Skip to content

Commit

Permalink
Make untracked lclVars contained
Browse files Browse the repository at this point in the history
Make untracked lclVars contained where possible.
  • Loading branch information
CarolEidt committed May 25, 2017
1 parent a125a6e commit 205136a
Show file tree
Hide file tree
Showing 6 changed files with 60 additions and 40 deletions.
9 changes: 2 additions & 7 deletions src/jit/codegenxarch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -714,12 +714,6 @@ void CodeGen::genCodeForDivMod(GenTreeOp* treeNode)
genConsumeOperands(treeNode->AsOp());
if (varTypeIsFloating(targetType))
{
// Check that divisor is a valid operand.
// Note that a reg optional operand is a treated as a memory op
// if no register is allocated to it.
assert(divisor->isUsedFromReg() || divisor->isMemoryOp() || divisor->IsCnsFltOrDbl() ||
divisor->IsRegOptional());

// Floating point div/rem operation
assert(oper == GT_DIV || oper == GT_MOD);

Expand Down Expand Up @@ -829,7 +823,8 @@ void CodeGen::genCodeForBinary(GenTree* treeNode)
if (!op1->isUsedFromReg())
{
assert(treeNode->OperIsCommutative());
assert(op1->isMemoryOp() || op1->IsCnsNonZeroFltOrDbl() || op1->IsIntCnsFitsInI32() || op1->IsRegOptional());
assert(op1->isMemoryOp() || op1->IsLocal() || op1->IsCnsNonZeroFltOrDbl() || op1->IsIntCnsFitsInI32() ||
op1->IsRegOptional());

op1 = treeNode->gtGetOp2();
op2 = treeNode->gtGetOp1();
Expand Down
4 changes: 2 additions & 2 deletions src/jit/emitxarch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2878,12 +2878,12 @@ regNumber emitter::emitInsBinary(instruction ins, emitAttr attr, GenTree* dst, G
GenTreeLclVar* lclVar = nullptr;
if (src->isLclVarUsedFromMemory())
{
assert(src->IsRegOptional());
assert(src->IsRegOptional() || !emitComp->lvaTable[src->gtLclVar.gtLclNum].lvIsRegCandidate());
lclVar = src->AsLclVar();
}
if (dst->isLclVarUsedFromMemory())
{
assert(dst->IsRegOptional());
assert(dst->IsRegOptional() || !emitComp->lvaTable[dst->gtLclVar.gtLclNum].lvIsRegCandidate());
lclVar = dst->AsLclVar();
}

Expand Down
26 changes: 23 additions & 3 deletions src/jit/lower.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,26 @@ bool Lowering::IsSafeToContainMem(GenTree* parentNode, GenTree* childNode)
return true;
}

//------------------------------------------------------------------------
// IsContainableMemoryOp: Checks whether this is a memory op that can be contained.
//
// Arguments:
// node - the node of interest.
//
// Notes:
// This differs from the isMemoryOp() method on GenTree because it checks for
// the case of an untracked local. Note that this won't include locals that
// for some reason do not become register candidates, nor those that get
// spilled.
//
// Return value:
// True if this will definitely be a memory reference that could be contained.
//
bool Lowering::IsContainableMemoryOp(GenTree* node)
{
return node->isMemoryOp() || (node->IsLocal() && !comp->lvaTable[node->AsLclVar()->gtLclNum].lvTracked);
}

//------------------------------------------------------------------------

// This is the main entry point for Lowering.
Expand Down Expand Up @@ -2427,7 +2447,7 @@ void Lowering::LowerCompare(GenTree* cmp)
GenTreeIntCon* op2 = cmp->gtGetOp2()->AsIntCon();
ssize_t op2Value = op2->IconValue();

if (op1->isMemoryOp() && varTypeIsSmall(op1Type) && genTypeCanRepresentValue(op1Type, op2Value))
if (IsContainableMemoryOp(op1) && varTypeIsSmall(op1Type) && genTypeCanRepresentValue(op1Type, op2Value))
{
//
// If op1's type is small then try to narrow op2 so it has the same type as op1.
Expand Down Expand Up @@ -2457,7 +2477,7 @@ void Lowering::LowerCompare(GenTree* cmp)
// the result of bool returning calls.
//

if (castOp->OperIs(GT_CALL, GT_LCL_VAR) || castOp->OperIsLogical() || castOp->isMemoryOp())
if (castOp->OperIs(GT_CALL, GT_LCL_VAR) || castOp->OperIsLogical() || IsContainableMemoryOp(castOp))
{
assert(!castOp->gtOverflowEx()); // Must not be an overflow checking operation

Expand Down Expand Up @@ -2502,7 +2522,7 @@ void Lowering::LowerCompare(GenTree* cmp)
cmp->gtOp.gtOp1 = andOp1;
cmp->gtOp.gtOp2 = andOp2;

if (andOp1->isMemoryOp() && andOp2->IsIntegralConst())
if (IsContainableMemoryOp(andOp1) && andOp2->IsIntegralConst())
{
//
// For "test" we only care about the bits that are set in the second operand (mask).
Expand Down
3 changes: 3 additions & 0 deletions src/jit/lower.h
Original file line number Diff line number Diff line change
Expand Up @@ -278,6 +278,9 @@ class Lowering : public Phase
// for example small enough and non-relocatable
bool IsContainableImmed(GenTree* parentNode, GenTree* childNode);

// Return true if 'node' is a containable memory op.
bool IsContainableMemoryOp(GenTree* node);

// Makes 'childNode' contained in the 'parentNode'
void MakeSrcContained(GenTreePtr parentNode, GenTreePtr childNode);

Expand Down
46 changes: 23 additions & 23 deletions src/jit/lsraxarch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -400,12 +400,12 @@ void Lowering::TreeNodeInfoInit(GenTree* tree)
info->srcCount = 2;
info->dstCount = 1;

if (op2->isMemoryOp() || op2->IsCnsNonZeroFltOrDbl())
if (IsContainableMemoryOp(op2) || op2->IsCnsNonZeroFltOrDbl())
{
MakeSrcContained(tree, op2);
}
else if (tree->OperIsCommutative() &&
(op1->IsCnsNonZeroFltOrDbl() || (op1->isMemoryOp() && IsSafeToContainMem(tree, op1))))
(op1->IsCnsNonZeroFltOrDbl() || (IsContainableMemoryOp(op1) && IsSafeToContainMem(tree, op1))))
{
// Though we have GT_ADD(op1=memOp, op2=non-memOp, we try to reorder the operands
// as long as it is safe so that the following efficient code sequence is generated:
Expand Down Expand Up @@ -629,7 +629,7 @@ void Lowering::TreeNodeInfoInit(GenTree* tree)
{
other = node->gtIndex;
}
else if (node->gtIndex->isMemoryOp())
else if (IsContainableMemoryOp(node->gtIndex))
{
other = node->gtIndex;
}
Expand All @@ -640,7 +640,7 @@ void Lowering::TreeNodeInfoInit(GenTree* tree)

if (node->gtIndex->TypeGet() == node->gtArrLen->TypeGet())
{
if (other->isMemoryOp())
if (IsContainableMemoryOp(other))
{
MakeSrcContained(tree, other);
}
Expand Down Expand Up @@ -845,7 +845,7 @@ void Lowering::TreeNodeInfoInit(GenTree* tree)
delayUseSrc = op1;
}
else if ((op2 != nullptr) &&
(!tree->OperIsCommutative() || (op2->isMemoryOp() && (op2->gtLsraInfo.srcCount == 0))))
(!tree->OperIsCommutative() || (IsContainableMemoryOp(op2) && (op2->gtLsraInfo.srcCount == 0))))
{
delayUseSrc = op2;
}
Expand Down Expand Up @@ -2110,15 +2110,15 @@ void Lowering::TreeNodeInfoInitLogicalOp(GenTree* tree)
binOpInRMW = IsBinOpInRMWStoreInd(tree);
if (!binOpInRMW)
{
if (op2->isMemoryOp() && tree->TypeGet() == op2->TypeGet())
if (IsContainableMemoryOp(op2) && tree->TypeGet() == op2->TypeGet())
{
directlyEncodable = true;
operand = op2;
}
else if (tree->OperIsCommutative())
{
if (IsContainableImmed(tree, op1) ||
(op1->isMemoryOp() && tree->TypeGet() == op1->TypeGet() && IsSafeToContainMem(tree, op1)))
(IsContainableMemoryOp(op1) && tree->TypeGet() == op1->TypeGet() && IsSafeToContainMem(tree, op1)))
{
// If it is safe, we can reverse the order of operands of commutative operations for efficient
// codegen
Expand Down Expand Up @@ -2176,7 +2176,7 @@ void Lowering::TreeNodeInfoInitModDiv(GenTree* tree)
// everything is made explicit by adding casts.
assert(op1->TypeGet() == op2->TypeGet());

if (op2->isMemoryOp() || op2->IsCnsNonZeroFltOrDbl())
if (IsContainableMemoryOp(op2) || op2->IsCnsNonZeroFltOrDbl())
{
MakeSrcContained(tree, op2);
}
Expand Down Expand Up @@ -2241,7 +2241,7 @@ void Lowering::TreeNodeInfoInitModDiv(GenTree* tree)
}

// divisor can be an r/m, but the memory indirection must be of the same size as the divide
if (op2->isMemoryOp() && (op2->TypeGet() == tree->TypeGet()))
if (IsContainableMemoryOp(op2) && (op2->TypeGet() == tree->TypeGet()))
{
MakeSrcContained(tree, op2);
}
Expand Down Expand Up @@ -2280,7 +2280,7 @@ void Lowering::TreeNodeInfoInitIntrinsic(GenTree* tree)
switch (tree->gtIntrinsic.gtIntrinsicId)
{
case CORINFO_INTRINSIC_Sqrt:
if (op1->isMemoryOp() || op1->IsCnsNonZeroFltOrDbl())
if (IsContainableMemoryOp(op1) || op1->IsCnsNonZeroFltOrDbl())
{
MakeSrcContained(tree, op1);
}
Expand Down Expand Up @@ -2581,7 +2581,7 @@ void Lowering::TreeNodeInfoInitSIMD(GenTree* tree)
info->srcCount = 1;
}

if (op1->isMemoryOp())
if (IsContainableMemoryOp(op1))
{
MakeSrcContained(tree, op1);

Expand Down Expand Up @@ -2805,7 +2805,7 @@ void Lowering::TreeNodeInfoInitCast(GenTree* tree)
// U8 -> R8 conversion requires that the operand be in a register.
if (castOpType != TYP_ULONG)
{
if (castOp->isMemoryOp() || castOp->IsCnsNonZeroFltOrDbl())
if (IsContainableMemoryOp(castOp) || castOp->IsCnsNonZeroFltOrDbl())
{
MakeSrcContained(tree, castOp);
}
Expand Down Expand Up @@ -3095,7 +3095,7 @@ void Lowering::TreeNodeInfoInitCmp(GenTreePtr tree)
{
MakeSrcContained(tree, otherOp);
}
else if (otherOp->isMemoryOp() && ((otherOp == op2) || IsSafeToContainMem(tree, otherOp)))
else if (IsContainableMemoryOp(otherOp) && ((otherOp == op2) || IsSafeToContainMem(tree, otherOp)))
{
MakeSrcContained(tree, otherOp);
}
Expand All @@ -3115,10 +3115,10 @@ void Lowering::TreeNodeInfoInitCmp(GenTreePtr tree)
if (CheckImmedAndMakeContained(tree, op2))
{
// If the types are the same, or if the constant is of the correct size,
// we can treat the isMemoryOp as contained.
// we can treat the MemoryOp as contained.
if (op1Type == op2Type)
{
if (op1->isMemoryOp())
if (IsContainableMemoryOp(op1))
{
MakeSrcContained(tree, op1);
}
Expand Down Expand Up @@ -3167,11 +3167,11 @@ void Lowering::TreeNodeInfoInitCmp(GenTreePtr tree)
// Note that TEST does not have a r,rm encoding like CMP has but we can still
// contain the second operand because the emitter maps both r,rm and rm,r to
// the same instruction code. This avoids the need to special case TEST here.
if (op2->isMemoryOp())
if (IsContainableMemoryOp(op2))
{
MakeSrcContained(tree, op2);
}
else if (op1->isMemoryOp() && IsSafeToContainMem(tree, op1))
else if (IsContainableMemoryOp(op1) && IsSafeToContainMem(tree, op1))
{
MakeSrcContained(tree, op1);
}
Expand Down Expand Up @@ -3260,7 +3260,7 @@ bool Lowering::TreeNodeInfoInitIfRMWMemOp(GenTreePtr storeInd)
if (GenTree::OperIsBinary(oper))
{
// On Xarch RMW operations require that the source memory-op be in a register.
assert(!indirOpSource->isMemoryOp() || indirOpSource->gtLsraInfo.dstCount == 1);
assert(!IsContainableMemoryOp(indirOpSource) || indirOpSource->gtLsraInfo.dstCount == 1);
JITDUMP("Lower succesfully detected an assignment of the form: *addrMode BinOp= source\n");
info->srcCount = indirOpSource->gtLsraInfo.dstCount;
}
Expand Down Expand Up @@ -3367,11 +3367,11 @@ void Lowering::TreeNodeInfoInitMul(GenTreePtr tree)
{
assert(tree->OperGet() == GT_MUL);

if (op2->isMemoryOp() || op2->IsCnsNonZeroFltOrDbl())
if (IsContainableMemoryOp(op2) || op2->IsCnsNonZeroFltOrDbl())
{
MakeSrcContained(tree, op2);
}
else if (op1->IsCnsNonZeroFltOrDbl() || (op1->isMemoryOp() && IsSafeToContainMem(tree, op1)))
else if (op1->IsCnsNonZeroFltOrDbl() || (IsContainableMemoryOp(op1) && IsSafeToContainMem(tree, op1)))
{
// Since GT_MUL is commutative, we will try to re-order operands if it is safe to
// generate more efficient code sequence for the case of GT_MUL(op1=memOp, op2=non-memOp)
Expand Down Expand Up @@ -3460,7 +3460,7 @@ void Lowering::TreeNodeInfoInitMul(GenTreePtr tree)
}

MakeSrcContained(tree, imm); // The imm is always contained
if (other->isMemoryOp())
if (IsContainableMemoryOp(other))
{
memOp = other; // memOp may be contained below
}
Expand All @@ -3471,7 +3471,7 @@ void Lowering::TreeNodeInfoInitMul(GenTreePtr tree)
// This is because during codegen we use 'tree' type to derive EmitTypeSize.
// E.g op1 type = byte, op2 type = byte but GT_MUL tree type is int.
//
if (memOp == nullptr && op2->isMemoryOp())
if (memOp == nullptr && IsContainableMemoryOp(op2))
{
memOp = op2;
}
Expand Down Expand Up @@ -3610,7 +3610,7 @@ bool Lowering::ExcludeNonByteableRegisters(GenTree* tree)
GenTree* op1 = simdNode->gtGetOp1();
GenTree* op2 = simdNode->gtGetOp2();
var_types baseType = simdNode->gtSIMDBaseType;
if (!op1->isMemoryOp() && op2->IsCnsIntOrI() && varTypeIsSmallInt(baseType))
if (!IsContainableMemoryOp(op1) && op2->IsCnsIntOrI() && varTypeIsSmallInt(baseType))
{
bool ZeroOrSignExtnReqd = true;
unsigned baseSize = genTypeSize(baseType);
Expand Down
12 changes: 7 additions & 5 deletions src/jit/simdcodegenxarch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2500,23 +2500,25 @@ void CodeGen::genSIMDIntrinsicGetItem(GenTreeSIMD* simdNode)
regNumber srcReg = op1->gtRegNum;

// Optimize the case of op1 is in memory and trying to access ith element.
if (op1->isMemoryOp())
if (!op1->isUsedFromReg())
{
assert(op1->isContained());

regNumber baseReg;
regNumber indexReg;
int offset = 0;

if (op1->OperGet() == GT_LCL_FLD)
if (op1->OperIsLocal())
{
// There are three parts to the total offset here:
// {offset of local} + {offset of SIMD Vector field} + {offset of element within SIMD vector}.
// {offset of local} + {offset of SIMD Vector field (lclFld only)} + {offset of element within SIMD vector}.
bool isEBPbased;
unsigned varNum = op1->gtLclVarCommon.gtLclNum;
offset += compiler->lvaFrameAddress(varNum, &isEBPbased);
offset += op1->gtLclFld.gtLclOffs;

if (op1->OperGet() == GT_LCL_FLD)
{
offset += op1->gtLclFld.gtLclOffs;
}
baseReg = (isEBPbased) ? REG_EBP : REG_ESP;
}
else
Expand Down

0 comments on commit 205136a

Please sign in to comment.