aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2019-09-02 17:48:59 +0000
committerDimitry Andric <dim@FreeBSD.org>2019-09-02 17:48:59 +0000
commit1ac1019db8e01f50b831a8db925b91fd69a5f6ea (patch)
treecdefa66bea8bea1aea9be528598df875f8a40dc0
parent464f838b7b7a19b95ae4b33010858de341c620a5 (diff)
Vendor import of llvm release_90 branch r370514:vendor/llvm/llvm-release_90-r370514
Notes
Notes: svn path=/vendor/llvm/dist-release_90/; revision=351709 svn path=/vendor/llvm/llvm-release_90-r370514/; revision=351710; tag=vendor/llvm/llvm-release_90-r370514
-rw-r--r--include/llvm/Analysis/InstructionSimplify.h11
-rw-r--r--include/llvm/IR/InlineAsm.h1
-rw-r--r--lib/Analysis/InstructionSimplify.cpp30
-rw-r--r--lib/IR/Core.cpp11
-rw-r--r--lib/Target/AArch64/AArch64ISelLowering.cpp10
-rw-r--r--lib/Target/AArch64/AArch64ISelLowering.h6
-rw-r--r--lib/Target/AArch64/AArch64InstrInfo.cpp12
-rw-r--r--lib/Target/Mips/AsmParser/MipsAsmParser.cpp19
-rw-r--r--lib/Target/RISCV/RISCVISelDAGToDAG.cpp3
-rw-r--r--lib/Target/RISCV/RISCVISelLowering.cpp17
-rw-r--r--lib/Target/RISCV/RISCVISelLowering.h3
-rw-r--r--lib/Target/TargetMachine.cpp8
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp16
-rw-r--r--lib/Target/X86/X86ISelLowering.h6
-rw-r--r--lib/Target/X86/X86Subtarget.cpp3
-rw-r--r--lib/Transforms/Utils/LoopUnroll.cpp8
16 files changed, 126 insertions, 38 deletions
diff --git a/include/llvm/Analysis/InstructionSimplify.h b/include/llvm/Analysis/InstructionSimplify.h
index 054ffca7215e..c36ad4c50df9 100644
--- a/include/llvm/Analysis/InstructionSimplify.h
+++ b/include/llvm/Analysis/InstructionSimplify.h
@@ -31,6 +31,7 @@
#ifndef LLVM_ANALYSIS_INSTRUCTIONSIMPLIFY_H
#define LLVM_ANALYSIS_INSTRUCTIONSIMPLIFY_H
+#include "llvm/ADT/SetVector.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/User.h"
@@ -263,12 +264,14 @@ Value *SimplifyInstruction(Instruction *I, const SimplifyQuery &Q,
/// This first performs a normal RAUW of I with SimpleV. It then recursively
/// attempts to simplify those users updated by the operation. The 'I'
/// instruction must not be equal to the simplified value 'SimpleV'.
+/// If UnsimplifiedUsers is provided, instructions that could not be simplified
+/// are added to it.
///
/// The function returns true if any simplifications were performed.
-bool replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,
- const TargetLibraryInfo *TLI = nullptr,
- const DominatorTree *DT = nullptr,
- AssumptionCache *AC = nullptr);
+bool replaceAndRecursivelySimplify(
+ Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr, AssumptionCache *AC = nullptr,
+ SmallSetVector<Instruction *, 8> *UnsimplifiedUsers = nullptr);
/// Recursively attempt to simplify an instruction.
///
diff --git a/include/llvm/IR/InlineAsm.h b/include/llvm/IR/InlineAsm.h
index 2aac807623a9..72d8ad1501ae 100644
--- a/include/llvm/IR/InlineAsm.h
+++ b/include/llvm/IR/InlineAsm.h
@@ -244,6 +244,7 @@ public:
Constraint_m,
Constraint_o,
Constraint_v,
+ Constraint_A,
Constraint_Q,
Constraint_R,
Constraint_S,
diff --git a/lib/Analysis/InstructionSimplify.cpp b/lib/Analysis/InstructionSimplify.cpp
index e34bf6f4e43f..941a68c5e6fd 100644
--- a/lib/Analysis/InstructionSimplify.cpp
+++ b/lib/Analysis/InstructionSimplify.cpp
@@ -5221,14 +5221,16 @@ Value *llvm::SimplifyInstruction(Instruction *I, const SimplifyQuery &SQ,
/// If we have a pre-simplified value in 'SimpleV', that is forcibly used to
/// replace the instruction 'I'. Otherwise, we simply add 'I' to the list of
/// instructions to process and attempt to simplify it using
-/// InstructionSimplify.
+/// InstructionSimplify. Recursively visited users which could not be
+/// simplified themselves are to the optional UnsimplifiedUsers set for
+/// further processing by the caller.
///
/// This routine returns 'true' only when *it* simplifies something. The passed
/// in simplified value does not count toward this.
-static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV,
- const TargetLibraryInfo *TLI,
- const DominatorTree *DT,
- AssumptionCache *AC) {
+static bool replaceAndRecursivelySimplifyImpl(
+ Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI,
+ const DominatorTree *DT, AssumptionCache *AC,
+ SmallSetVector<Instruction *, 8> *UnsimplifiedUsers = nullptr) {
bool Simplified = false;
SmallSetVector<Instruction *, 8> Worklist;
const DataLayout &DL = I->getModule()->getDataLayout();
@@ -5258,8 +5260,11 @@ static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV,
// See if this instruction simplifies.
SimpleV = SimplifyInstruction(I, {DL, TLI, DT, AC});
- if (!SimpleV)
+ if (!SimpleV) {
+ if (UnsimplifiedUsers)
+ UnsimplifiedUsers->insert(I);
continue;
+ }
Simplified = true;
@@ -5285,16 +5290,17 @@ bool llvm::recursivelySimplifyInstruction(Instruction *I,
const TargetLibraryInfo *TLI,
const DominatorTree *DT,
AssumptionCache *AC) {
- return replaceAndRecursivelySimplifyImpl(I, nullptr, TLI, DT, AC);
+ return replaceAndRecursivelySimplifyImpl(I, nullptr, TLI, DT, AC, nullptr);
}
-bool llvm::replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,
- const TargetLibraryInfo *TLI,
- const DominatorTree *DT,
- AssumptionCache *AC) {
+bool llvm::replaceAndRecursivelySimplify(
+ Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI,
+ const DominatorTree *DT, AssumptionCache *AC,
+ SmallSetVector<Instruction *, 8> *UnsimplifiedUsers) {
assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!");
assert(SimpleV && "Must provide a simplified value.");
- return replaceAndRecursivelySimplifyImpl(I, SimpleV, TLI, DT, AC);
+ return replaceAndRecursivelySimplifyImpl(I, SimpleV, TLI, DT, AC,
+ UnsimplifiedUsers);
}
namespace llvm {
diff --git a/lib/IR/Core.cpp b/lib/IR/Core.cpp
index 310935b5213a..af9ba7b3ca69 100644
--- a/lib/IR/Core.cpp
+++ b/lib/IR/Core.cpp
@@ -140,7 +140,16 @@ unsigned LLVMGetLastEnumAttributeKind(void) {
LLVMAttributeRef LLVMCreateEnumAttribute(LLVMContextRef C, unsigned KindID,
uint64_t Val) {
- return wrap(Attribute::get(*unwrap(C), (Attribute::AttrKind)KindID, Val));
+ auto &Ctx = *unwrap(C);
+ auto AttrKind = (Attribute::AttrKind)KindID;
+
+ if (AttrKind == Attribute::AttrKind::ByVal) {
+ // After r362128, byval attributes need to have a type attribute. Provide a
+ // NULL one until a proper API is added for this.
+ return wrap(Attribute::getWithByValType(Ctx, NULL));
+ } else {
+ return wrap(Attribute::get(Ctx, AttrKind, Val));
+ }
}
unsigned LLVMGetEnumAttributeKind(LLVMAttributeRef A) {
diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp
index 6c250aea39f0..03923878fd51 100644
--- a/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -10579,7 +10579,7 @@ static SDValue performPostLD1Combine(SDNode *N,
// are predecessors to each other or the Vector.
SmallPtrSet<const SDNode *, 32> Visited;
SmallVector<const SDNode *, 16> Worklist;
- Visited.insert(N);
+ Visited.insert(Addr.getNode());
Worklist.push_back(User);
Worklist.push_back(LD);
Worklist.push_back(Vector.getNode());
@@ -11995,6 +11995,14 @@ bool AArch64TargetLowering::isMaskAndCmp0FoldingBeneficial(
return Mask->getValue().isPowerOf2();
}
+bool AArch64TargetLowering::shouldExpandShift(SelectionDAG &DAG,
+ SDNode *N) const {
+ if (DAG.getMachineFunction().getFunction().hasMinSize() &&
+ !Subtarget->isTargetWindows())
+ return false;
+ return true;
+}
+
void AArch64TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
// Update IsSplitCSR in AArch64unctionInfo.
AArch64FunctionInfo *AFI = Entry->getParent()->getInfo<AArch64FunctionInfo>();
diff --git a/lib/Target/AArch64/AArch64ISelLowering.h b/lib/Target/AArch64/AArch64ISelLowering.h
index 4421c31f65c9..86f313933d85 100644
--- a/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/lib/Target/AArch64/AArch64ISelLowering.h
@@ -480,11 +480,7 @@ public:
return VT.getSizeInBits() >= 64; // vector 'bic'
}
- bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override {
- if (DAG.getMachineFunction().getFunction().hasMinSize())
- return false;
- return true;
- }
+ bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override;
bool shouldTransformSignedTruncationCheck(EVT XVT,
unsigned KeptBits) const override {
diff --git a/lib/Target/AArch64/AArch64InstrInfo.cpp b/lib/Target/AArch64/AArch64InstrInfo.cpp
index 215e96a82d0e..fa9e5d808c4f 100644
--- a/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -32,6 +32,7 @@
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/GlobalValue.h"
+#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/Support/Casting.h"
@@ -1928,6 +1929,17 @@ bool AArch64InstrInfo::isCandidateToMergeOrPair(const MachineInstr &MI) const {
if (isLdStPairSuppressed(MI))
return false;
+ // Do not pair any callee-save store/reload instructions in the
+ // prologue/epilogue if the CFI information encoded the operations as separate
+ // instructions, as that will cause the size of the actual prologue to mismatch
+ // with the prologue size recorded in the Windows CFI.
+ const MCAsmInfo *MAI = MI.getMF()->getTarget().getMCAsmInfo();
+ bool NeedsWinCFI = MAI->usesWindowsCFI() &&
+ MI.getMF()->getFunction().needsUnwindTableEntry();
+ if (NeedsWinCFI && (MI.getFlag(MachineInstr::FrameSetup) ||
+ MI.getFlag(MachineInstr::FrameDestroy)))
+ return false;
+
// On some CPUs quad load/store pairs are slower than two single load/stores.
if (Subtarget.isPaired128Slow()) {
switch (MI.getOpcode()) {
diff --git a/lib/Target/Mips/AsmParser/MipsAsmParser.cpp b/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
index 1f7d095bf49b..aee434cb0067 100644
--- a/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
+++ b/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
@@ -3625,8 +3625,25 @@ void MipsAsmParser::expandMemInst(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
TOut.emitRRR(isGP64bit() ? Mips::DADDu : Mips::ADDu, TmpReg, TmpReg,
BaseReg, IDLoc, STI);
TOut.emitRRI(Inst.getOpcode(), DstReg, TmpReg, LoOffset, IDLoc, STI);
+ return;
+ }
+
+ assert(OffsetOp.isExpr() && "expected expression operand kind");
+ if (inPicMode()) {
+ // FIXME:
+ // a) Fix lw/sw $reg, symbol($reg) instruction expanding.
+ // b) If expression includes offset (sym + number), do not
+ // encode the offset into a relocation. Take it in account
+ // in the last load/store instruction.
+ // c) Check that immediates of R_MIPS_GOT16/R_MIPS_LO16 relocations
+ // do not exceed 16-bit.
+ // d) Use R_MIPS_GOT_PAGE/R_MIPS_GOT_OFST relocations instead
+ // of R_MIPS_GOT_DISP in appropriate cases to reduce number
+ // of GOT entries.
+ expandLoadAddress(TmpReg, Mips::NoRegister, OffsetOp, !ABI.ArePtrs64bit(),
+ IDLoc, Out, STI);
+ TOut.emitRRI(Inst.getOpcode(), DstReg, TmpReg, 0, IDLoc, STI);
} else {
- assert(OffsetOp.isExpr() && "expected expression operand kind");
const MCExpr *ExprOffset = OffsetOp.getExpr();
MCOperand LoOperand = MCOperand::createExpr(
MipsMCExpr::create(MipsMCExpr::MEK_LO, ExprOffset, getContext()));
diff --git a/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index d0a3af375a6d..8439278b4ed5 100644
--- a/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -179,6 +179,9 @@ bool RISCVDAGToDAGISel::SelectInlineAsmMemoryOperand(
// operand and need no special handling.
OutOps.push_back(Op);
return false;
+ case InlineAsm::Constraint_A:
+ OutOps.push_back(Op);
+ return false;
default:
break;
}
diff --git a/lib/Target/RISCV/RISCVISelLowering.cpp b/lib/Target/RISCV/RISCVISelLowering.cpp
index e695f79f5cf4..2b0f64fa6db6 100644
--- a/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -2413,6 +2413,8 @@ RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
case 'J':
case 'K':
return C_Immediate;
+ case 'A':
+ return C_Memory;
}
}
return TargetLowering::getConstraintType(Constraint);
@@ -2442,6 +2444,21 @@ RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
}
+unsigned
+RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
+ // Currently only support length 1 constraints.
+ if (ConstraintCode.size() == 1) {
+ switch (ConstraintCode[0]) {
+ case 'A':
+ return InlineAsm::Constraint_A;
+ default:
+ break;
+ }
+ }
+
+ return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
+}
+
void RISCVTargetLowering::LowerAsmOperandForConstraint(
SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
SelectionDAG &DAG) const {
diff --git a/lib/Target/RISCV/RISCVISelLowering.h b/lib/Target/RISCV/RISCVISelLowering.h
index f28c4753c1d9..e2059e70831d 100644
--- a/lib/Target/RISCV/RISCVISelLowering.h
+++ b/lib/Target/RISCV/RISCVISelLowering.h
@@ -93,6 +93,9 @@ public:
const char *getTargetNodeName(unsigned Opcode) const override;
ConstraintType getConstraintType(StringRef Constraint) const override;
+
+ unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override;
+
std::pair<unsigned, const TargetRegisterClass *>
getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
StringRef Constraint, MVT VT) const override;
diff --git a/lib/Target/TargetMachine.cpp b/lib/Target/TargetMachine.cpp
index 634866d93570..28d783afe56c 100644
--- a/lib/Target/TargetMachine.cpp
+++ b/lib/Target/TargetMachine.cpp
@@ -140,8 +140,8 @@ bool TargetMachine::shouldAssumeDSOLocal(const Module &M,
// don't assume the variables to be DSO local unless we actually know
// that for sure. This only has to be done for variables; for functions
// the linker can insert thunks for calling functions from another DLL.
- if (TT.isWindowsGNUEnvironment() && GV && GV->isDeclarationForLinker() &&
- isa<GlobalVariable>(GV))
+ if (TT.isWindowsGNUEnvironment() && TT.isOSBinFormatCOFF() && GV &&
+ GV->isDeclarationForLinker() && isa<GlobalVariable>(GV))
return false;
// On COFF, don't mark 'extern_weak' symbols as DSO local. If these symbols
@@ -154,7 +154,9 @@ bool TargetMachine::shouldAssumeDSOLocal(const Module &M,
// Make an exception for windows OS in the triple: Some firmware builds use
// *-win32-macho triples. This (accidentally?) produced windows relocations
// without GOT tables in older clang versions; Keep this behaviour.
- if (TT.isOSBinFormatCOFF() || (TT.isOSWindows() && TT.isOSBinFormatMachO()))
+ // Some JIT users use *-win32-elf triples; these shouldn't use GOT tables
+ // either.
+ if (TT.isOSBinFormatCOFF() || TT.isOSWindows())
return true;
// Most PIC code sequences that assume that a symbol is local cannot
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index ad68ddbeaa8b..eecf34902ddc 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -5059,6 +5059,14 @@ bool X86TargetLowering::shouldFoldMaskToVariableShiftPair(SDValue Y) const {
return true;
}
+bool X86TargetLowering::shouldExpandShift(SelectionDAG &DAG,
+ SDNode *N) const {
+ if (DAG.getMachineFunction().getFunction().hasMinSize() &&
+ !Subtarget.isOSWindows())
+ return false;
+ return true;
+}
+
bool X86TargetLowering::shouldSplatInsEltVarIndex(EVT VT) const {
// Any legal vector type can be splatted more efficiently than
// loading/spilling from memory.
@@ -44096,7 +44104,8 @@ static SDValue combineScalarToVector(SDNode *N, SelectionDAG &DAG) {
// Simplify PMULDQ and PMULUDQ operations.
static SDValue combinePMULDQ(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI) {
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
@@ -44106,8 +44115,9 @@ static SDValue combinePMULDQ(SDNode *N, SelectionDAG &DAG,
return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), RHS, LHS);
// Multiply by zero.
+ // Don't return RHS as it may contain UNDEFs.
if (ISD::isBuildVectorAllZeros(RHS.getNode()))
- return RHS;
+ return getZeroVector(N->getSimpleValueType(0), Subtarget, DAG, SDLoc(N));
// Aggressively peek through ops to get at the demanded low bits.
APInt DemandedMask = APInt::getLowBitsSet(64, 32);
@@ -44315,7 +44325,7 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
case X86ISD::PCMPEQ:
case X86ISD::PCMPGT: return combineVectorCompare(N, DAG, Subtarget);
case X86ISD::PMULDQ:
- case X86ISD::PMULUDQ: return combinePMULDQ(N, DAG, DCI);
+ case X86ISD::PMULUDQ: return combinePMULDQ(N, DAG, DCI, Subtarget);
}
return SDValue();
diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h
index e0be03bc3f9d..db36bcb929e3 100644
--- a/lib/Target/X86/X86ISelLowering.h
+++ b/lib/Target/X86/X86ISelLowering.h
@@ -863,11 +863,7 @@ namespace llvm {
return VTIsOk(XVT) && VTIsOk(KeptBitsVT);
}
- bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override {
- if (DAG.getMachineFunction().getFunction().hasMinSize())
- return false;
- return true;
- }
+ bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override;
bool shouldSplatInsEltVarIndex(EVT VT) const override;
diff --git a/lib/Target/X86/X86Subtarget.cpp b/lib/Target/X86/X86Subtarget.cpp
index d5bb56603df9..61fc3d05b8b3 100644
--- a/lib/Target/X86/X86Subtarget.cpp
+++ b/lib/Target/X86/X86Subtarget.cpp
@@ -146,6 +146,9 @@ unsigned char X86Subtarget::classifyGlobalReference(const GlobalValue *GV,
return X86II::MO_DLLIMPORT;
return X86II::MO_COFFSTUB;
}
+ // Some JIT users use *-win32-elf triples; these shouldn't use GOT tables.
+ if (isOSWindows())
+ return X86II::MO_NO_FLAG;
if (is64Bit()) {
// ELF supports a large, truly PIC code model with non-PC relative GOT
diff --git a/lib/Transforms/Utils/LoopUnroll.cpp b/lib/Transforms/Utils/LoopUnroll.cpp
index e39ade523714..4a1edb3700c0 100644
--- a/lib/Transforms/Utils/LoopUnroll.cpp
+++ b/lib/Transforms/Utils/LoopUnroll.cpp
@@ -711,7 +711,7 @@ LoopUnrollResult llvm::UnrollLoop(Loop *L, UnrollLoopOptions ULO, LoopInfo *LI,
auto setDest = [LoopExit, ContinueOnTrue](BasicBlock *Src, BasicBlock *Dest,
ArrayRef<BasicBlock *> NextBlocks,
- BasicBlock *CurrentHeader,
+ BasicBlock *BlockInLoop,
bool NeedConditional) {
auto *Term = cast<BranchInst>(Src->getTerminator());
if (NeedConditional) {
@@ -723,7 +723,9 @@ LoopUnrollResult llvm::UnrollLoop(Loop *L, UnrollLoopOptions ULO, LoopInfo *LI,
if (Dest != LoopExit) {
BasicBlock *BB = Src;
for (BasicBlock *Succ : successors(BB)) {
- if (Succ == CurrentHeader)
+ // Preserve the incoming value from BB if we are jumping to the block
+ // in the current loop.
+ if (Succ == BlockInLoop)
continue;
for (PHINode &Phi : Succ->phis())
Phi.removeIncomingValue(BB, false);
@@ -794,7 +796,7 @@ LoopUnrollResult llvm::UnrollLoop(Loop *L, UnrollLoopOptions ULO, LoopInfo *LI,
// unconditional branch for some iterations.
NeedConditional = false;
- setDest(Headers[i], Dest, Headers, Headers[i], NeedConditional);
+ setDest(Headers[i], Dest, Headers, HeaderSucc[i], NeedConditional);
}
// Set up latches to branch to the new header in the unrolled iterations or