summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target
diff options
context:
space:
mode:
authorHans Wennborg <hans@hanshq.net>2016-05-18 16:10:17 +0000
committerHans Wennborg <hans@hanshq.net>2016-05-18 16:10:17 +0000
commit8eb336c14e4c033a3f81a3a43fe397f0879a2e68 (patch)
treee85fba6e6e6ed0c2528c0c565c6fea53085520d6 /llvm/lib/Target
parent9430b9113a54eafaa9bd7d7bb288c7a457d26404 (diff)
downloadbcm5719-llvm-8eb336c14e4c033a3f81a3a43fe397f0879a2e68.tar.gz
bcm5719-llvm-8eb336c14e4c033a3f81a3a43fe397f0879a2e68.zip
Re-commit r269828 "X86: Avoid using _chkstk when lowering WIN_ALLOCA instructions"
with an additional fix to make RegAllocFast ignore undef physreg uses. It would previously get confused about the "push %eax" instruction's use of eax. That method for adjusting the stack pointer is used in X86FrameLowering::emitSPUpdate as well, but since that runs after register-allocation, we didn't run into the RegAllocFast issue before. llvm-svn: 269949
Diffstat (limited to 'llvm/lib/Target')
-rw-r--r--llvm/lib/Target/X86/CMakeLists.txt1
-rw-r--r--llvm/lib/Target/X86/X86.h3
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp23
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.h3
-rw-r--r--llvm/lib/Target/X86/X86InstrCompiler.td33
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.td6
-rw-r--r--llvm/lib/Target/X86/X86MachineFunctionInfo.h6
-rw-r--r--llvm/lib/Target/X86/X86TargetMachine.cpp1
-rw-r--r--llvm/lib/Target/X86/X86WinAllocaExpander.cpp294
9 files changed, 332 insertions, 38 deletions
diff --git a/llvm/lib/Target/X86/CMakeLists.txt b/llvm/lib/Target/X86/CMakeLists.txt
index e996f2ee838..401a8e9ce09 100644
--- a/llvm/lib/Target/X86/CMakeLists.txt
+++ b/llvm/lib/Target/X86/CMakeLists.txt
@@ -37,6 +37,7 @@ set(sources
X86WinEHState.cpp
X86OptimizeLEAs.cpp
X86FixupBWInsts.cpp
+ X86WinAllocaExpander.cpp
)
add_llvm_target(X86CodeGen ${sources})
diff --git a/llvm/lib/Target/X86/X86.h b/llvm/lib/Target/X86/X86.h
index c12554c9b92..27b428e2151 100644
--- a/llvm/lib/Target/X86/X86.h
+++ b/llvm/lib/Target/X86/X86.h
@@ -59,6 +59,9 @@ FunctionPass *createX86FixupLEAs();
/// recalculations.
FunctionPass *createX86OptimizeLEAs();
+/// Return a pass that expands WinAlloca pseudo-instructions.
+FunctionPass *createX86WinAllocaExpander();
+
/// Return a pass that optimizes the code-size of x86 call sequences. This is
/// done by replacing esp-relative movs with pushes.
FunctionPass *createX86CallFrameOptimization();
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 368e265f8e5..eb9fddf8970 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -16563,14 +16563,9 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
Result = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
DAG.getRegister(Vreg, SPTy));
} else {
- SDValue Flag;
- const unsigned Reg = (Subtarget.isTarget64BitLP64() ? X86::RAX : X86::EAX);
-
- Chain = DAG.getCopyToReg(Chain, dl, Reg, Size, Flag);
- Flag = Chain.getValue(1);
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
-
- Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag);
+ Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Size);
+ MF.getInfo<X86MachineFunctionInfo>()->setHasWinAlloca(true);
const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
unsigned SPReg = RegInfo->getStackRegister();
@@ -23229,18 +23224,6 @@ X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI,
}
MachineBasicBlock *
-X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI,
- MachineBasicBlock *BB) const {
- assert(!Subtarget.isTargetMachO());
- DebugLoc DL = MI->getDebugLoc();
- MachineInstr *ResumeMI = Subtarget.getFrameLowering()->emitStackProbe(
- *BB->getParent(), *BB, MI, DL, false);
- MachineBasicBlock *ResumeBB = ResumeMI->getParent();
- MI->eraseFromParent(); // The pseudo instruction is gone now.
- return ResumeBB;
-}
-
-MachineBasicBlock *
X86TargetLowering::EmitLoweredCatchRet(MachineInstr *MI,
MachineBasicBlock *BB) const {
MachineFunction *MF = BB->getParent();
@@ -23702,8 +23685,6 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::TLS_base_addr32:
case X86::TLS_base_addr64:
return EmitLoweredTLSAddr(MI, BB);
- case X86::WIN_ALLOCA:
- return EmitLoweredWinAlloca(MI, BB);
case X86::CATCHRET:
return EmitLoweredCatchRet(MI, BB);
case X86::CATCHPAD:
diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h
index 52c4553082d..4cf35d2436b 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/llvm/lib/Target/X86/X86ISelLowering.h
@@ -1162,9 +1162,6 @@ namespace llvm {
MachineBasicBlock *EmitLoweredAtomicFP(MachineInstr *I,
MachineBasicBlock *BB) const;
- MachineBasicBlock *EmitLoweredWinAlloca(MachineInstr *MI,
- MachineBasicBlock *BB) const;
-
MachineBasicBlock *EmitLoweredCatchRet(MachineInstr *MI,
MachineBasicBlock *BB) const;
diff --git a/llvm/lib/Target/X86/X86InstrCompiler.td b/llvm/lib/Target/X86/X86InstrCompiler.td
index 5efa7c74fab..66326eaf50b 100644
--- a/llvm/lib/Target/X86/X86InstrCompiler.td
+++ b/llvm/lib/Target/X86/X86InstrCompiler.td
@@ -99,18 +99,6 @@ def VAARG_64 : I<0, Pseudo,
(X86vaarg64 addr:$ap, imm:$size, imm:$mode, imm:$align)),
(implicit EFLAGS)]>;
-// Dynamic stack allocation yields a _chkstk or _alloca call for all Windows
-// targets. These calls are needed to probe the stack when allocating more than
-// 4k bytes in one go. Touching the stack at 4K increments is necessary to
-// ensure that the guard pages used by the OS virtual memory manager are
-// allocated in correct sequence.
-// The main point of having separate instruction are extra unmodelled effects
-// (compared to ordinary calls) like stack pointer change.
-
-let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
- def WIN_ALLOCA : I<0, Pseudo, (outs), (ins),
- "# dynamic stack allocation",
- [(X86WinAlloca)]>;
// When using segmented stacks these are lowered into instructions which first
// check if the current stacklet has enough free memory. If it does, memory is
@@ -132,6 +120,27 @@ def SEG_ALLOCA_64 : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$size),
Requires<[In64BitMode]>;
}
+// Dynamic stack allocation yields a _chkstk or _alloca call for all Windows
+// targets. These calls are needed to probe the stack when allocating more than
+// 4k bytes in one go. Touching the stack at 4K increments is necessary to
+// ensure that the guard pages used by the OS virtual memory manager are
+// allocated in correct sequence.
+// The main point of having separate instruction are extra unmodelled effects
+// (compared to ordinary calls) like stack pointer change.
+
+let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
+def WIN_ALLOCA_32 : I<0, Pseudo, (outs), (ins GR32:$size),
+ "# dynamic stack allocation",
+ [(X86WinAlloca GR32:$size)]>,
+ Requires<[NotLP64]>;
+
+let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in
+def WIN_ALLOCA_64 : I<0, Pseudo, (outs), (ins GR64:$size),
+ "# dynamic stack allocation",
+ [(X86WinAlloca GR64:$size)]>,
+ Requires<[In64BitMode]>;
+
+
//===----------------------------------------------------------------------===//
// EH Pseudo Instructions
//
diff --git a/llvm/lib/Target/X86/X86InstrInfo.td b/llvm/lib/Target/X86/X86InstrInfo.td
index dcd3f5395ab..18c74555a93 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.td
+++ b/llvm/lib/Target/X86/X86InstrInfo.td
@@ -112,6 +112,8 @@ def SDT_X86TLSBASEADDR : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
def SDT_X86TLSCALL : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
+def SDT_X86WIN_ALLOCA : SDTypeProfile<0, 1, [SDTCisVT<0, iPTR>]>;
+
def SDT_X86SEG_ALLOCA : SDTypeProfile<1, 1, [SDTCisVT<0, iPTR>, SDTCisVT<1, iPTR>]>;
def SDT_X86EHRET : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
@@ -273,8 +275,8 @@ def X86bextr : SDNode<"X86ISD::BEXTR", SDTIntBinOp>;
def X86mul_imm : SDNode<"X86ISD::MUL_IMM", SDTIntBinOp>;
-def X86WinAlloca : SDNode<"X86ISD::WIN_ALLOCA", SDTX86Void,
- [SDNPHasChain, SDNPInGlue, SDNPOutGlue]>;
+def X86WinAlloca : SDNode<"X86ISD::WIN_ALLOCA", SDT_X86WIN_ALLOCA,
+ [SDNPHasChain, SDNPOutGlue]>;
def X86SegAlloca : SDNode<"X86ISD::SEG_ALLOCA", SDT_X86SEG_ALLOCA,
[SDNPHasChain]>;
diff --git a/llvm/lib/Target/X86/X86MachineFunctionInfo.h b/llvm/lib/Target/X86/X86MachineFunctionInfo.h
index 4632adaf1e7..d517d82537a 100644
--- a/llvm/lib/Target/X86/X86MachineFunctionInfo.h
+++ b/llvm/lib/Target/X86/X86MachineFunctionInfo.h
@@ -98,6 +98,9 @@ class X86MachineFunctionInfo : public MachineFunctionInfo {
/// True if this function uses the red zone.
bool UsesRedZone = false;
+ /// True if this function has WIN_ALLOCA instructions.
+ bool HasWinAlloca = false;
+
private:
/// ForwardedMustTailRegParms - A list of virtual and physical registers
/// that must be forwarded to every musttail call.
@@ -172,6 +175,9 @@ public:
bool getUsesRedZone() const { return UsesRedZone; }
void setUsesRedZone(bool V) { UsesRedZone = V; }
+
+ bool hasWinAlloca() const { return HasWinAlloca; }
+ void setHasWinAlloca(bool v) { HasWinAlloca = v; }
};
} // End llvm namespace
diff --git a/llvm/lib/Target/X86/X86TargetMachine.cpp b/llvm/lib/Target/X86/X86TargetMachine.cpp
index cde3197c58c..1fe98a66c7c 100644
--- a/llvm/lib/Target/X86/X86TargetMachine.cpp
+++ b/llvm/lib/Target/X86/X86TargetMachine.cpp
@@ -266,6 +266,7 @@ void X86PassConfig::addPreRegAlloc() {
addPass(createX86OptimizeLEAs());
addPass(createX86CallFrameOptimization());
+ addPass(createX86WinAllocaExpander());
}
void X86PassConfig::addPostRegAlloc() {
diff --git a/llvm/lib/Target/X86/X86WinAllocaExpander.cpp b/llvm/lib/Target/X86/X86WinAllocaExpander.cpp
new file mode 100644
index 00000000000..cc82074e685
--- /dev/null
+++ b/llvm/lib/Target/X86/X86WinAllocaExpander.cpp
@@ -0,0 +1,294 @@
+//===----- X86WinAllocaExpander.cpp - Expand WinAlloca pseudo instruction -===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a pass that expands WinAlloca pseudo-instructions.
+//
+// It performs a conservative analysis to determine whether each allocation
+// falls within a region of the stack that is safe to use, or whether stack
+// probes must be emitted.
+//
+//===----------------------------------------------------------------------===//
+
+#include "X86.h"
+#include "X86InstrBuilder.h"
+#include "X86InstrInfo.h"
+#include "X86MachineFunctionInfo.h"
+#include "X86Subtarget.h"
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetInstrInfo.h"
+
+using namespace llvm;
+
+namespace {
+
+class X86WinAllocaExpander : public MachineFunctionPass {
+public:
+ X86WinAllocaExpander() : MachineFunctionPass(ID) {}
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+private:
+ /// Strategies for lowering a WinAlloca.
+ enum Lowering { TouchAndSub, Sub, Probe };
+
+ /// Deterministic-order map from WinAlloca instruction to desired lowering.
+ typedef MapVector<MachineInstr*, Lowering> LoweringMap;
+
+ /// Compute which lowering to use for each WinAlloca instruction.
+ void computeLowerings(MachineFunction &MF, LoweringMap& Lowerings);
+
+ /// Get the appropriate lowering based on current offset and amount.
+ Lowering getLowering(int64_t CurrentOffset, int64_t AllocaAmount);
+
+ /// Lower a WinAlloca instruction.
+ void lower(MachineInstr* MI, Lowering L);
+
+ MachineRegisterInfo *MRI;
+ const X86Subtarget *STI;
+ const TargetInstrInfo *TII;
+ const X86RegisterInfo *TRI;
+ unsigned StackPtr;
+ unsigned SlotSize;
+ int64_t StackProbeSize;
+
+ const char *getPassName() const override { return "X86 WinAlloca Expander"; }
+ static char ID;
+};
+
+char X86WinAllocaExpander::ID = 0;
+
+} // end anonymous namespace
+
+FunctionPass *llvm::createX86WinAllocaExpander() {
+ return new X86WinAllocaExpander();
+}
+
+/// Return the allocation amount for a WinAlloca instruction, or -1 if unknown.
+static int64_t getWinAllocaAmount(MachineInstr *MI, MachineRegisterInfo *MRI) {
+ assert(MI->getOpcode() == X86::WIN_ALLOCA_32 ||
+ MI->getOpcode() == X86::WIN_ALLOCA_64);
+ assert(MI->getOperand(0).isReg());
+
+ unsigned AmountReg = MI->getOperand(0).getReg();
+ MachineInstr *Def = MRI->getUniqueVRegDef(AmountReg);
+
+ // Look through copies.
+ while (Def && Def->isCopy() && Def->getOperand(1).isReg())
+ Def = MRI->getUniqueVRegDef(Def->getOperand(1).getReg());
+
+ if (!Def ||
+ (Def->getOpcode() != X86::MOV32ri && Def->getOpcode() != X86::MOV64ri) ||
+ !Def->getOperand(1).isImm())
+ return -1;
+
+ return Def->getOperand(1).getImm();
+}
+
+X86WinAllocaExpander::Lowering
+X86WinAllocaExpander::getLowering(int64_t CurrentOffset,
+ int64_t AllocaAmount) {
+ // For a non-constant amount or a large amount, we have to probe.
+ if (AllocaAmount < 0 || AllocaAmount > StackProbeSize)
+ return Probe;
+
+ // If it fits within the safe region of the stack, just subtract.
+ if (CurrentOffset + AllocaAmount <= StackProbeSize)
+ return Sub;
+
+ // Otherwise, touch the current tip of the stack, then subtract.
+ return TouchAndSub;
+}
+
+static bool isPushPop(const MachineInstr &MI) {
+ switch (MI.getOpcode()) {
+ case X86::PUSH32i8:
+ case X86::PUSH32r:
+ case X86::PUSH32rmm:
+ case X86::PUSH32rmr:
+ case X86::PUSHi32:
+ case X86::PUSH64i8:
+ case X86::PUSH64r:
+ case X86::PUSH64rmm:
+ case X86::PUSH64rmr:
+ case X86::PUSH64i32:
+ case X86::POP32r:
+ case X86::POP64r:
+ return true;
+ default:
+ return false;
+ }
+}
+
+void X86WinAllocaExpander::computeLowerings(MachineFunction &MF,
+ LoweringMap &Lowerings) {
+ // Do a one-pass reverse post-order walk of the CFG to conservatively estimate
+ // the offset between the stack pointer and the lowest touched part of the
+ // stack, and use that to decide how to lower each WinAlloca instruction.
+
+ // Initialize OutOffset[B], the stack offset at exit from B, to something big.
+ DenseMap<MachineBasicBlock *, int64_t> OutOffset;
+ for (MachineBasicBlock &MBB : MF)
+ OutOffset[&MBB] = INT32_MAX;
+
+ // Note: we don't know the offset at the start of the entry block since the
+ // prologue hasn't been inserted yet, and how much that will adjust the stack
+ // pointer depends on register spills, which have not been computed yet.
+
+ // Compute the reverse post-order.
+ ReversePostOrderTraversal<MachineFunction*> RPO(&MF);
+
+ for (MachineBasicBlock *MBB : RPO) {
+ int64_t Offset = -1;
+ for (MachineBasicBlock *Pred : MBB->predecessors())
+ Offset = std::max(Offset, OutOffset[Pred]);
+ if (Offset == -1) Offset = INT32_MAX;
+
+ for (MachineInstr &MI : *MBB) {
+ if (MI.getOpcode() == X86::WIN_ALLOCA_32 ||
+ MI.getOpcode() == X86::WIN_ALLOCA_64) {
+ // A WinAlloca moves StackPtr, and potentially touches it.
+ int64_t Amount = getWinAllocaAmount(&MI, MRI);
+ Lowering L = getLowering(Offset, Amount);
+ Lowerings[&MI] = L;
+ switch (L) {
+ case Sub:
+ Offset += Amount;
+ break;
+ case TouchAndSub:
+ Offset = Amount;
+ break;
+ case Probe:
+ Offset = 0;
+ break;
+ }
+ } else if (MI.isCall() || isPushPop(MI)) {
+ // Calls, pushes and pops touch the tip of the stack.
+ Offset = 0;
+ } else if (MI.getOpcode() == X86::ADJCALLSTACKUP32 ||
+ MI.getOpcode() == X86::ADJCALLSTACKUP64) {
+ Offset -= MI.getOperand(0).getImm();
+ } else if (MI.getOpcode() == X86::ADJCALLSTACKDOWN32 ||
+ MI.getOpcode() == X86::ADJCALLSTACKDOWN64) {
+ Offset += MI.getOperand(0).getImm();
+ } else if (MI.modifiesRegister(StackPtr, TRI)) {
+ // Any other modification of SP means we've lost track of it.
+ Offset = INT32_MAX;
+ }
+ }
+
+ OutOffset[MBB] = Offset;
+ }
+}
+
+static unsigned getSubOpcode(bool Is64Bit, int64_t Amount) {
+ if (Is64Bit)
+ return isInt<8>(Amount) ? X86::SUB64ri8 : X86::SUB64ri32;
+ return isInt<8>(Amount) ? X86::SUB32ri8 : X86::SUB32ri;
+}
+
+void X86WinAllocaExpander::lower(MachineInstr* MI, Lowering L) {
+ DebugLoc DL = MI->getDebugLoc();
+ MachineBasicBlock *MBB = MI->getParent();
+ MachineBasicBlock::iterator I = *MI;
+
+ int64_t Amount = getWinAllocaAmount(MI, MRI);
+ if (Amount == 0) {
+ MI->eraseFromParent();
+ return;
+ }
+
+ bool Is64Bit = STI->is64Bit();
+ assert(SlotSize == 4 || SlotSize == 8);
+ unsigned RegA = (SlotSize == 8) ? X86::RAX : X86::EAX;
+
+ switch (L) {
+ case TouchAndSub:
+ assert(Amount >= SlotSize);
+
+ // Use a push to touch the top of the stack.
+ BuildMI(*MBB, I, DL, TII->get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))
+ .addReg(RegA, RegState::Undef);
+ Amount -= SlotSize;
+ if (!Amount)
+ break;
+
+ // Fall through to make any remaining adjustment.
+ case Sub:
+ assert(Amount > 0);
+ if (Amount == SlotSize) {
+ // Use push to save size.
+ BuildMI(*MBB, I, DL, TII->get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))
+ .addReg(RegA, RegState::Undef);
+ } else {
+ // Sub.
+ BuildMI(*MBB, I, DL, TII->get(getSubOpcode(Is64Bit, Amount)), StackPtr)
+ .addReg(StackPtr)
+ .addImm(Amount);
+ }
+ break;
+ case Probe:
+ // The probe lowering expects the amount in RAX/EAX.
+ BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::COPY), RegA)
+ .addReg(MI->getOperand(0).getReg());
+
+ // Do the probe.
+ STI->getFrameLowering()->emitStackProbe(*MBB->getParent(), *MBB, MI, DL,
+ /*InPrologue=*/false);
+ break;
+ }
+
+ unsigned AmountReg = MI->getOperand(0).getReg();
+ MI->eraseFromParent();
+
+ // Delete the definition of AmountReg, possibly walking a chain of copies.
+ for (;;) {
+ if (!MRI->use_empty(AmountReg))
+ break;
+ MachineInstr *AmountDef = MRI->getUniqueVRegDef(AmountReg);
+ if (!AmountDef)
+ break;
+ if (AmountDef->isCopy() && AmountDef->getOperand(1).isReg())
+ AmountReg = AmountDef->getOperand(1).isReg();
+ AmountDef->eraseFromParent();
+ break;
+ }
+}
+
+bool X86WinAllocaExpander::runOnMachineFunction(MachineFunction &MF) {
+ if (!MF.getInfo<X86MachineFunctionInfo>()->hasWinAlloca())
+ return false;
+
+ MRI = &MF.getRegInfo();
+ STI = &MF.getSubtarget<X86Subtarget>();
+ TII = STI->getInstrInfo();
+ TRI = STI->getRegisterInfo();
+ StackPtr = TRI->getStackRegister();
+ SlotSize = TRI->getSlotSize();
+
+ StackProbeSize = 4096;
+ if (MF.getFunction()->hasFnAttribute("stack-probe-size")) {
+ MF.getFunction()
+ ->getFnAttribute("stack-probe-size")
+ .getValueAsString()
+ .getAsInteger(0, StackProbeSize);
+ }
+
+ LoweringMap Lowerings;
+ computeLowerings(MF, Lowerings);
+ for (auto &P : Lowerings)
+ lower(P.first, P.second);
+
+ return true;
+}
OpenPOWER on IntegriCloud