summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDan Gohman <gohman@apple.com>2009-02-22 18:06:32 +0000
committerDan Gohman <gohman@apple.com>2009-02-22 18:06:32 +0000
commit648c5e9c9928c55a89eb4a71f20cb065686e07ed (patch)
tree4fb06e91f44b74a9955cf470965afb22f49e33c7
parentf394e58af589d6c198f4c8eca8f83110d8529b5b (diff)
downloadbcm5719-llvm-648c5e9c9928c55a89eb4a71f20cb065686e07ed.tar.gz
bcm5719-llvm-648c5e9c9928c55a89eb4a71f20cb065686e07ed.zip
Revert the part of 64623 that attempted to align the source in a
memcpy to match the alignment of the destination. It isn't necessary for making loads and stores handled like the SSE loadu/storeu intrinsics, and it was causing a performance regression in MultiSource/Applications/JM/lencod. The problem appears to have been a memcpy that copies from some highly aligned array into an alloca; the alloca was then being assigned a large alignment, which required codegen to perform dynamic stack-pointer re-alignment, which forced the enclosing function to have a frame pointer, which led to increased spilling. llvm-svn: 65289
-rw-r--r--llvm/lib/Transforms/Scalar/InstructionCombining.cpp2
1 files changed, 1 insertions, 1 deletions
diff --git a/llvm/lib/Transforms/Scalar/InstructionCombining.cpp b/llvm/lib/Transforms/Scalar/InstructionCombining.cpp
index cfd1bac7ba8..2b18640f41a 100644
--- a/llvm/lib/Transforms/Scalar/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/Scalar/InstructionCombining.cpp
@@ -9286,7 +9286,7 @@ unsigned InstCombiner::GetOrEnforceKnownAlignment(Value *V,
Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
unsigned DstAlign = GetOrEnforceKnownAlignment(MI->getOperand(1));
- unsigned SrcAlign = GetOrEnforceKnownAlignment(MI->getOperand(2), DstAlign);
+ unsigned SrcAlign = GetOrEnforceKnownAlignment(MI->getOperand(2));
unsigned MinAlign = std::min(DstAlign, SrcAlign);
unsigned CopyAlign = MI->getAlignment()->getZExtValue();
OpenPOWER on IntegriCloud