diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index 26801adca120df..fee5d2356c226f 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -4692,40 +4692,25 @@ class Compiler // tree node). void fgValueNumber(); - // Computes new GcHeap VN via the assignment H[elemTypeEq][arrVN][inx][fldSeq] = rhsVN. - // Assumes that "elemTypeEq" is the (equivalence class rep) of the array element type. - // The 'indType' is the indirection type of the lhs of the assignment and will typically - // match the element type of the array or fldSeq. When this type doesn't match - // or if the fldSeq is 'NotAField' we invalidate the array contents H[elemTypeEq][arrVN] - // - ValueNum fgValueNumberArrIndexAssign(CORINFO_CLASS_HANDLE elemTypeEq, - ValueNum arrVN, - ValueNum inxVN, - FieldSeqNode* fldSeq, - ValueNum rhsVN, - var_types indType); - - // Requires that "tree" is a GT_IND marked as an array index, and that its address argument - // has been parsed to yield the other input arguments. If evaluation of the address - // can raise exceptions, those should be captured in the exception set "addrXvnp". - // Assumes that "elemTypeEq" is the (equivalence class rep) of the array element type. - // Marks "tree" with the VN for H[elemTypeEq][arrVN][inx][fldSeq] (for the liberal VN; a new unique - // VN for the conservative VN.) Also marks the tree's argument as the address of an array element. - // The type tree->TypeGet() will typically match the element type of the array or fldSeq. - // When this type doesn't match or if the fldSeq is 'NotAField' we return a new unique VN - // - ValueNum fgValueNumberArrIndexVal(GenTree* tree, - CORINFO_CLASS_HANDLE elemTypeEq, - ValueNum arrVN, - ValueNum inxVN, - ValueNumPair addrXvnp, - FieldSeqNode* fldSeq); - - // Requires "funcApp" to be a VNF_PtrToArrElem, and "addrXvnp" to represent the exception set thrown - // by evaluating the array index expression "tree". Returns the value number resulting from - // dereferencing the array in the current GcHeap state. If "tree" is non-null, it must be the - // "GT_IND" that does the dereference, and it is given the returned value number. - ValueNum fgValueNumberArrIndexVal(GenTree* tree, VNFuncApp* funcApp, ValueNumPair addrXvnp); + void fgValueNumberLocalStore(GenTree* storeNode, + GenTreeLclVarCommon* lclDefNode, + ssize_t offset, + unsigned storeSize, + ValueNumPair value, + bool normalize = true); + + void fgValueNumberArrayElemLoad(GenTree* loadTree, VNFuncApp* addrFunc); + + void fgValueNumberArrayElemStore(GenTree* storeNode, VNFuncApp* addrFunc, unsigned storeSize, ValueNum value); + + void fgValueNumberFieldLoad(GenTree* loadTree, GenTree* baseAddr, FieldSeqNode* fieldSeq, ssize_t offset); + + void fgValueNumberFieldStore(GenTree* storeNode, + GenTree* baseAddr, + FieldSeqNode* fieldSeq, + ssize_t offset, + unsigned storeSize, + ValueNum value); // Compute the value number for a byref-exposed load of the given type via the given pointerVN. ValueNum fgValueNumberByrefExposedLoad(var_types type, ValueNum pointerVN); diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp index d4d76c70734ae7..eedf2b60e4fdb7 100644 --- a/src/coreclr/jit/gentree.cpp +++ b/src/coreclr/jit/gentree.cpp @@ -15997,7 +15997,7 @@ bool GenTree::IsPartialLclFld(Compiler* comp) (comp->lvaTable[this->AsLclVarCommon()->GetLclNum()].lvExactSize != genTypeSize(gtType))); } -bool GenTree::DefinesLocal(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire) +bool GenTree::DefinesLocal(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire, ssize_t* pOffset) { GenTreeBlk* blkNode = nullptr; if (OperIs(GT_ASG)) @@ -16017,12 +16017,17 @@ bool GenTree::DefinesLocal(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, bo *pIsEntire = true; } } + if (pOffset != nullptr) + { + *pOffset = AsOp()->gtOp1->AsLclVarCommon()->GetLclOffs(); + } return true; } else if (AsOp()->gtOp1->OperGet() == GT_IND) { GenTree* indArg = AsOp()->gtOp1->AsOp()->gtOp1; - return indArg->DefinesLocalAddr(comp, genTypeSize(AsOp()->gtOp1->TypeGet()), pLclVarTree, pIsEntire); + return indArg->DefinesLocalAddr(comp, genTypeSize(AsOp()->gtOp1->TypeGet()), pLclVarTree, pIsEntire, + pOffset); } else if (AsOp()->gtOp1->OperIsBlk()) { @@ -16038,7 +16043,7 @@ bool GenTree::DefinesLocal(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, bo } unsigned size = comp->typGetObjLayout(AsCall()->gtRetClsHnd)->GetSize(); - return retBufArg->DefinesLocalAddr(comp, size, pLclVarTree, pIsEntire); + return retBufArg->DefinesLocalAddr(comp, size, pLclVarTree, pIsEntire, pOffset); } else if (OperIsBlk()) { @@ -16064,14 +16069,14 @@ bool GenTree::DefinesLocal(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, bo } } - return destAddr->DefinesLocalAddr(comp, width, pLclVarTree, pIsEntire); + return destAddr->DefinesLocalAddr(comp, width, pLclVarTree, pIsEntire, pOffset); } // Otherwise... return false; } -// Returns true if this GenTree defines a result which is based on the address of a local. -bool GenTree::DefinesLocalAddr(Compiler* comp, unsigned width, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire) +bool GenTree::DefinesLocalAddr( + Compiler* comp, unsigned width, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire, ssize_t* pOffset) { if (OperIs(GT_ADDR, GT_LCL_VAR_ADDR, GT_LCL_FLD_ADDR)) { @@ -16085,10 +16090,10 @@ bool GenTree::DefinesLocalAddr(Compiler* comp, unsigned width, GenTreeLclVarComm { GenTreeLclVarCommon* addrArgLcl = addrArg->AsLclVarCommon(); *pLclVarTree = addrArgLcl; + unsigned lclOffset = addrArgLcl->GetLclOffs(); + if (pIsEntire != nullptr) { - unsigned lclOffset = addrArgLcl->GetLclOffs(); - if (lclOffset != 0) { // We aren't updating the bytes at [0..lclOffset-1] so *pIsEntire should be set to false @@ -16107,29 +16112,45 @@ bool GenTree::DefinesLocalAddr(Compiler* comp, unsigned width, GenTreeLclVarComm *pIsEntire = (varWidth == width); } } + + if (pOffset != nullptr) + { + *pOffset += lclOffset; + } + return true; } else if (addrArg->OperGet() == GT_IND) { // A GT_ADDR of a GT_IND can both be optimized away, recurse using the child of the GT_IND - return addrArg->AsOp()->gtOp1->DefinesLocalAddr(comp, width, pLclVarTree, pIsEntire); + return addrArg->AsOp()->gtOp1->DefinesLocalAddr(comp, width, pLclVarTree, pIsEntire, pOffset); } } else if (OperGet() == GT_ADD) { if (AsOp()->gtOp1->IsCnsIntOrI()) { + if (pOffset != nullptr) + { + *pOffset += AsOp()->gtOp1->AsIntCon()->IconValue(); + } + // If we just adding a zero then we allow an IsEntire match against width // otherwise we change width to zero to disallow an IsEntire Match return AsOp()->gtOp2->DefinesLocalAddr(comp, AsOp()->gtOp1->IsIntegralConst(0) ? width : 0, pLclVarTree, - pIsEntire); + pIsEntire, pOffset); } else if (AsOp()->gtOp2->IsCnsIntOrI()) { + if (pOffset != nullptr) + { + *pOffset += AsOp()->gtOp2->AsIntCon()->IconValue(); + } + // If we just adding a zero then we allow an IsEntire match against width // otherwise we change width to zero to disallow an IsEntire Match return AsOp()->gtOp1->DefinesLocalAddr(comp, AsOp()->gtOp2->IsIntegralConst(0) ? width : 0, pLclVarTree, - pIsEntire); + pIsEntire, pOffset); } } // Post rationalization we could have GT_IND(GT_LEA(..)) trees. @@ -16145,20 +16166,20 @@ bool GenTree::DefinesLocalAddr(Compiler* comp, unsigned width, GenTreeLclVarComm GenTree* index = AsOp()->gtOp2; if (index != nullptr) { - assert(!index->DefinesLocalAddr(comp, width, pLclVarTree, pIsEntire)); + assert(!index->DefinesLocalAddr(comp, width, pLclVarTree, pIsEntire, pOffset)); } #endif // DEBUG // base - GenTree* base = AsOp()->gtOp1; + GenTree* base = AsAddrMode()->Base(); if (base != nullptr) { - // Lea could have an Indir as its base. - if (base->OperGet() == GT_IND) + if (pOffset != nullptr) { - base = base->AsOp()->gtOp1->gtEffectiveVal(/*commas only*/ true); + *pOffset += AsAddrMode()->Offset(); } - return base->DefinesLocalAddr(comp, width, pLclVarTree, pIsEntire); + + return base->DefinesLocalAddr(comp, width, pLclVarTree, pIsEntire, pOffset); } } // Otherwise... @@ -16604,6 +16625,12 @@ ssize_t GenTreeIndir::Offset() } } +unsigned GenTreeIndir::Size() const +{ + assert(isIndir() || OperIsBlk()); + return OperIsBlk() ? AsBlk()->Size() : genTypeSize(TypeGet()); +} + //------------------------------------------------------------------------ // GenTreeIntConCommon::ImmedValNeedsReloc: does this immediate value needs recording a relocation with the VM? // @@ -16731,6 +16758,7 @@ bool GenTreeIntConCommon::AddrNeedsReloc(Compiler* comp) // comp - the Compiler object // pBaseAddr - [out] parameter for "the base address" // pFldSeq - [out] parameter for the field sequence +// pOffset - [out] parameter for the offset of the component struct fields // // Return Value: // If "this" matches patterns denoted above, and the FldSeq found is "full", @@ -16742,7 +16770,7 @@ bool GenTreeIntConCommon::AddrNeedsReloc(Compiler* comp) // reference, for statics - the address to which the field offset with the // field sequence is added, see "impImportStaticFieldAccess" and "fgMorphField". // -bool GenTree::IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeqNode** pFldSeq) +bool GenTree::IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeqNode** pFldSeq, ssize_t* pOffset) { assert(TypeIs(TYP_I_IMPL, TYP_BYREF, TYP_REF)); @@ -16751,6 +16779,7 @@ bool GenTree::IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeqNode** pF GenTree* baseAddr = nullptr; FieldSeqNode* fldSeq = FieldSeqStore::NotAField(); + ssize_t offset = 0; if (OperIs(GT_ADD)) { @@ -16759,14 +16788,16 @@ bool GenTree::IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeqNode** pF if (AsOp()->gtOp1->IsCnsIntOrI() && AsOp()->gtOp1->IsIconHandle()) { assert(!AsOp()->gtOp2->IsCnsIntOrI() || !AsOp()->gtOp2->IsIconHandle()); - fldSeq = AsOp()->gtOp1->AsIntCon()->gtFieldSeq; baseAddr = AsOp()->gtOp2; + fldSeq = AsOp()->gtOp1->AsIntCon()->gtFieldSeq; + offset = AsOp()->gtOp1->AsIntCon()->IconValue(); } else if (AsOp()->gtOp2->IsCnsIntOrI()) { assert(!AsOp()->gtOp1->IsCnsIntOrI() || !AsOp()->gtOp1->IsIconHandle()); - fldSeq = AsOp()->gtOp2->AsIntCon()->gtFieldSeq; baseAddr = AsOp()->gtOp1; + fldSeq = AsOp()->gtOp2->AsIntCon()->gtFieldSeq; + offset = AsOp()->gtOp2->AsIntCon()->IconValue(); } else { @@ -16778,12 +16809,15 @@ bool GenTree::IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeqNode** pF else if (IsCnsIntOrI() && IsIconHandle(GTF_ICON_STATIC_HDL)) { assert(!comp->GetZeroOffsetFieldMap()->Lookup(this) && (AsIntCon()->gtFieldSeq != nullptr)); - fldSeq = AsIntCon()->gtFieldSeq; baseAddr = this; + fldSeq = AsIntCon()->gtFieldSeq; + offset = AsIntCon()->IconValue(); } else if (comp->GetZeroOffsetFieldMap()->Lookup(this, &fldSeq)) { + assert((fldSeq != FieldSeqStore::NotAField()) || (fldSeq->GetOffset() == 0)); baseAddr = this; + offset = 0; } else { @@ -16797,6 +16831,9 @@ bool GenTree::IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeqNode** pF return false; } + // Subtract from the offset such that the portion remaining is relative to the field itself. + offset -= fldSeq->GetOffset(); + // The above screens out obviously invalid cases, but we have more checks to perform. The // sequence returned from this method *must* start with either a class (NOT struct) field // or a static field. To avoid the expense of calling "getFieldClass" here, we will instead @@ -16811,6 +16848,7 @@ bool GenTree::IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeqNode** pF } *pFldSeq = fldSeq; + *pOffset = offset; return true; } @@ -16820,6 +16858,7 @@ bool GenTree::IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeqNode** pF *pBaseAddr = baseAddr; *pFldSeq = fldSeq; + *pOffset = offset; return true; } @@ -18088,16 +18127,18 @@ bool GenTree::IsArrayAddr(GenTreeArrAddr** pArrAddr) // Note that the value of the below field doesn't matter; it exists only to provide a distinguished address. // // static -FieldSeqNode FieldSeqStore::s_notAField(nullptr, nullptr, FieldSeqNode::FieldKind::Instance); +FieldSeqNode FieldSeqStore::s_notAField(nullptr, nullptr, 0, FieldSeqNode::FieldKind::Instance); // FieldSeqStore methods. FieldSeqStore::FieldSeqStore(CompAllocator alloc) : m_alloc(alloc), m_canonMap(new (alloc) FieldSeqNodeCanonMap(alloc)) { } -FieldSeqNode* FieldSeqStore::CreateSingleton(CORINFO_FIELD_HANDLE fieldHnd, FieldSeqNode::FieldKind fieldKind) +FieldSeqNode* FieldSeqStore::CreateSingleton(CORINFO_FIELD_HANDLE fieldHnd, + size_t offset, + FieldSeqNode::FieldKind fieldKind) { - FieldSeqNode fsn(fieldHnd, nullptr, fieldKind); + FieldSeqNode fsn(fieldHnd, nullptr, offset, fieldKind); FieldSeqNode* res = nullptr; if (m_canonMap->Lookup(fsn, &res)) { @@ -18136,7 +18177,7 @@ FieldSeqNode* FieldSeqStore::Append(FieldSeqNode* a, FieldSeqNode* b) assert(a != b); FieldSeqNode* tmp = Append(a->GetNext(), b); - FieldSeqNode fsn(a->GetFieldHandleValue(), tmp, a->GetKind()); + FieldSeqNode fsn(a->GetFieldHandleValue(), tmp, a->GetOffset(), a->GetKind()); FieldSeqNode* res = nullptr; if (m_canonMap->Lookup(fsn, &res)) { @@ -18152,7 +18193,8 @@ FieldSeqNode* FieldSeqStore::Append(FieldSeqNode* a, FieldSeqNode* b) } } -FieldSeqNode::FieldSeqNode(CORINFO_FIELD_HANDLE fieldHnd, FieldSeqNode* next, FieldKind fieldKind) : m_next(next) +FieldSeqNode::FieldSeqNode(CORINFO_FIELD_HANDLE fieldHnd, FieldSeqNode* next, size_t offset, FieldKind fieldKind) + : m_next(next), m_offset(offset) { uintptr_t handleValue = reinterpret_cast(fieldHnd); @@ -18162,6 +18204,7 @@ FieldSeqNode::FieldSeqNode(CORINFO_FIELD_HANDLE fieldHnd, FieldSeqNode* next, Fi if (fieldHnd != NO_FIELD_HANDLE) { assert(JitTls::GetCompiler()->eeIsFieldStatic(fieldHnd) == IsStaticField()); + // TODO-PhysicalVN: assert that "offset" is correct. } else { @@ -22816,6 +22859,11 @@ unsigned GenTreeHWIntrinsic::GetResultOpNumForFMA(GenTree* use, GenTree* op1, Ge } #endif // TARGET_XARCH && FEATURE_HW_INTRINSICS +unsigned GenTreeLclFld::GetSize() const +{ + return genTypeSize(TypeGet()); +} + #ifdef TARGET_ARM //------------------------------------------------------------------------ // IsOffsetMisaligned: check if the field needs a special handling on arm. diff --git a/src/coreclr/jit/gentree.h b/src/coreclr/jit/gentree.h index 7f11f4f26680ac..4f0b6baff2a2a8 100644 --- a/src/coreclr/jit/gentree.h +++ b/src/coreclr/jit/gentree.h @@ -243,9 +243,10 @@ class FieldSeqNode uintptr_t m_fieldHandleAndKind; FieldSeqNode* m_next; + size_t m_offset; public: - FieldSeqNode(CORINFO_FIELD_HANDLE fieldHnd, FieldSeqNode* next, FieldKind fieldKind); + FieldSeqNode(CORINFO_FIELD_HANDLE fieldHnd, FieldSeqNode* next, size_t offset, FieldKind fieldKind); FieldKind GetKind() const { @@ -268,6 +269,18 @@ class FieldSeqNode return m_next; } + //------------------------------------------------------------------------ + // GetOffset: Retrieve "the offset" for the field this node represents. + // + // For statics with a known (constant) address it will be the value of that address. + // For boxed statics, it will be TARGET_POINTER_SIZE (the method table pointer size). + // For other fields, it will be equal to the value "getFieldOffset" would return. + // + size_t GetOffset() const + { + return m_offset; + } + bool IsStaticField() const { return (GetKind() == FieldKind::SimpleStatic) || (GetKind() == FieldKind::SharedStatic); @@ -290,7 +303,8 @@ class FieldSeqNode // Make sure this provides methods that allow it to be used as a KeyFuncs type in JitHashTable. // Note that there is a one-to-one relationship between the field handle and the field kind, so - // we do not need to mask away the latter for comparison purposes. + // we do not need to mask away the latter for comparison purposes. Likewise, we do not need to + // include the offset. static int GetHashCode(FieldSeqNode fsn) { return static_cast(fsn.m_fieldHandleAndKind) ^ static_cast(reinterpret_cast(fsn.m_next)); @@ -317,6 +331,7 @@ class FieldSeqStore // Returns the (canonical in the store) singleton field sequence for the given handle. FieldSeqNode* CreateSingleton(CORINFO_FIELD_HANDLE fieldHnd, + size_t offset, FieldSeqNode::FieldKind fieldKind = FieldSeqNode::FieldKind::Instance); // This is a special distinguished FieldSeqNode indicating that a constant does *not* @@ -1900,7 +1915,10 @@ struct GenTree // tree for the local that is defined, and, if "pIsEntire" is non-null, sets "*pIsEntire" to // true or false, depending on whether the assignment writes to the entirety of the local // variable, or just a portion of it. - bool DefinesLocal(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire = nullptr); + bool DefinesLocal(Compiler* comp, + GenTreeLclVarCommon** pLclVarTree, + bool* pIsEntire = nullptr, + ssize_t* pOffset = nullptr); bool IsLocalAddrExpr(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, @@ -1930,7 +1948,7 @@ struct GenTree // Determine whether this tree is a basic block profile count update. bool IsBlockProfileUpdate(); - bool IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeqNode** pFldSeq); + bool IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeqNode** pFldSeq, ssize_t* pOffset); bool IsArrayAddr(GenTreeArrAddr** pArrAddr); @@ -1939,7 +1957,8 @@ struct GenTree // operation. Returns "true" if "this" is an address of (or within) // a local variable; sets "*pLclVarTree" to that local variable instance; and, if "pIsEntire" is non-null, // sets "*pIsEntire" to true if this assignment writes the full width of the local. - bool DefinesLocalAddr(Compiler* comp, unsigned width, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire); + bool DefinesLocalAddr( + Compiler* comp, unsigned width, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire, ssize_t* pOffset = nullptr); // These are only used for dumping. // The GetRegNum() is only valid in LIR, but the dumping methods are not easily @@ -3520,6 +3539,8 @@ struct GenTreeLclFld : public GenTreeLclVarCommon m_fieldSeq = fieldSeq; } + unsigned GetSize() const; + #ifdef TARGET_ARM bool IsOffsetMisaligned() const; #endif // TARGET_ARM @@ -6522,6 +6543,8 @@ struct GenTreeIndir : public GenTreeOp unsigned Scale(); ssize_t Offset(); + unsigned Size() const; + GenTreeIndir(genTreeOps oper, var_types type, GenTree* addr, GenTree* data) : GenTreeOp(oper, type, addr, data) { } diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index bf8e6114f19e71..10f34bc7db5e77 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -1410,11 +1410,14 @@ GenTree* Compiler::impAssignStructPtr(GenTree* destAddr, assert(OFFSETOF__CORINFO_TypedReference__dataPtr == 0); assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF); - fgAddFieldSeqForZeroOffset(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField())); + fgAddFieldSeqForZeroOffset(destAddr, + GetFieldSeqStore()->CreateSingleton(GetRefanyDataField(), + OFFSETOF__CORINFO_TypedReference__dataPtr)); GenTree* ptrSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr); GenTreeIntCon* typeFieldOffset = gtNewIconNode(OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL); - typeFieldOffset->gtFieldSeq = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField()); + typeFieldOffset->gtFieldSeq = + GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField(), OFFSETOF__CORINFO_TypedReference__type); GenTree* typeSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset)); @@ -3635,11 +3638,11 @@ GenTree* Compiler::impCreateSpanIntrinsic(CORINFO_SIG_INFO* sig) CORINFO_FIELD_HANDLE lengthFieldHnd = info.compCompHnd->getFieldInClass(spanHnd, 1); GenTreeLclFld* pointerField = gtNewLclFldNode(spanTempNum, TYP_BYREF, 0); - pointerField->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(pointerFieldHnd)); + pointerField->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(pointerFieldHnd, 0)); GenTree* pointerFieldAsg = gtNewAssignNode(pointerField, pointerValue); GenTreeLclFld* lengthField = gtNewLclFldNode(spanTempNum, TYP_INT, TARGET_POINTER_SIZE); - lengthField->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(lengthFieldHnd)); + lengthField->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(lengthFieldHnd, TARGET_POINTER_SIZE)); GenTree* lengthFieldAsg = gtNewAssignNode(lengthField, lengthValue); // Now append a few statements the initialize the span @@ -8164,11 +8167,35 @@ GenTree* Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedT (pFieldInfo->fieldAccessor == CORINFO_FIELD_STATIC_READYTORUN_HELPER); FieldSeqNode::FieldKind fieldKind = isSharedStatic ? FieldSeqNode::FieldKind::SharedStatic : FieldSeqNode::FieldKind::SimpleStatic; - FieldSeqNode* innerFldSeq = !isBoxedStatic ? GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField, fieldKind) - : FieldSeqStore::NotAField(); - GenTree* op1; + FieldSeqNode* innerFldSeq = nullptr; + FieldSeqNode* outerFldSeq = nullptr; + if (isBoxedStatic) + { + innerFldSeq = FieldSeqStore::NotAField(); + outerFldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField, TARGET_POINTER_SIZE, fieldKind); + } + else + { + bool hasConstAddr = (pFieldInfo->fieldAccessor == CORINFO_FIELD_STATIC_ADDRESS) || + (pFieldInfo->fieldAccessor == CORINFO_FIELD_STATIC_RVA_ADDRESS); + size_t offset; + if (hasConstAddr) + { + offset = reinterpret_cast(info.compCompHnd->getFieldAddress(pResolvedToken->hField)); + assert(offset != 0); + } + else + { + offset = pFieldInfo->offset; + } + + innerFldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField, offset, fieldKind); + outerFldSeq = FieldSeqStore::NotAField(); + } + + GenTree* op1; switch (pFieldInfo->fieldAccessor) { case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: @@ -8294,8 +8321,6 @@ GenTree* Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedT if (isBoxedStatic) { - FieldSeqNode* outerFldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField, fieldKind); - op1->ChangeType(TYP_REF); // points at boxed object op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(TARGET_POINTER_SIZE, outerFldSeq)); @@ -8319,8 +8344,6 @@ GenTree* Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedT if (isBoxedStatic) { - FieldSeqNode* outerFldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField, fieldKind); - op1 = gtNewOperNode(GT_IND, TYP_REF, op1); op1->gtFlags |= (GTF_IND_INVARIANT | GTF_IND_NONFAULTING | GTF_IND_NONNULL); diff --git a/src/coreclr/jit/lclmorph.cpp b/src/coreclr/jit/lclmorph.cpp index 3860682ac36a6d..7bfb53142c8e22 100644 --- a/src/coreclr/jit/lclmorph.cpp +++ b/src/coreclr/jit/lclmorph.cpp @@ -308,7 +308,8 @@ class LocalAddressVisitor final : public GenTreeVisitor if (haveCorrectFieldForVN) { FieldSeqStore* fieldSeqStore = compiler->GetFieldSeqStore(); - m_fieldSeq = fieldSeqStore->Append(val.m_fieldSeq, fieldSeqStore->CreateSingleton(field->gtFldHnd)); + FieldSeqNode* fieldSeqNode = fieldSeqStore->CreateSingleton(field->gtFldHnd, field->gtFldOffset); + m_fieldSeq = fieldSeqStore->Append(val.m_fieldSeq, fieldSeqNode); } else { diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp index 1e5926e1f6fd0a..24fac09a013b92 100644 --- a/src/coreclr/jit/lclvars.cpp +++ b/src/coreclr/jit/lclvars.cpp @@ -3309,7 +3309,7 @@ void Compiler::lvaUpdateClass(unsigned varNum, GenTree* tree, CORINFO_CLASS_HAND // // Returns: // Number of bytes needed on the frame for such a local. - +// unsigned Compiler::lvaLclSize(unsigned varNum) { assert(varNum < lvaCount); diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index 65b50de28f1108..1c480ba0e34502 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -3465,9 +3465,11 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call) // (tmp.ptr=argx),(tmp.type=handle) GenTreeLclFld* destPtrSlot = gtNewLclFldNode(tmp, TYP_I_IMPL, OFFSETOF__CORINFO_TypedReference__dataPtr); GenTreeLclFld* destTypeSlot = gtNewLclFldNode(tmp, TYP_I_IMPL, OFFSETOF__CORINFO_TypedReference__type); - destPtrSlot->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(GetRefanyDataField())); + destPtrSlot->SetFieldSeq( + GetFieldSeqStore()->CreateSingleton(GetRefanyDataField(), OFFSETOF__CORINFO_TypedReference__dataPtr)); destPtrSlot->gtFlags |= GTF_VAR_DEF; - destTypeSlot->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField())); + destTypeSlot->SetFieldSeq( + GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField(), OFFSETOF__CORINFO_TypedReference__type)); destTypeSlot->gtFlags |= GTF_VAR_DEF; GenTree* asgPtrSlot = gtNewAssignNode(destPtrSlot, argx->AsOp()->gtOp1); @@ -5290,24 +5292,12 @@ GenTree* Compiler::fgMorphField(GenTree* tree, MorphAddrContext* mac) { assert(tree->gtOper == GT_FIELD); - CORINFO_FIELD_HANDLE symHnd = tree->AsField()->gtFldHnd; - unsigned fldOffset = tree->AsField()->gtFldOffset; - GenTree* objRef = tree->AsField()->GetFldObj(); - bool objIsLocal = false; - - FieldSeqNode* fieldSeq = FieldSeqStore::NotAField(); - if (!tree->AsField()->gtFldMayOverlap) - { - if (objRef != nullptr) - { - fieldSeq = GetFieldSeqStore()->CreateSingleton(symHnd, FieldSeqNode::FieldKind::Instance); - } - else - { - // Only simple statics get imported as GT_FIELDs. - fieldSeq = GetFieldSeqStore()->CreateSingleton(symHnd, FieldSeqNode::FieldKind::SimpleStatic); - } - } + CORINFO_FIELD_HANDLE symHnd = tree->AsField()->gtFldHnd; + unsigned fldOffset = tree->AsField()->gtFldOffset; + GenTree* objRef = tree->AsField()->GetFldObj(); + bool objIsLocal = false; + bool fldMayOverlap = tree->AsField()->gtFldMayOverlap; + FieldSeqNode* fieldSeq = FieldSeqStore::NotAField(); // Reset the flag because we may reuse the node. tree->AsField()->gtFldMayOverlap = false; @@ -5573,6 +5563,12 @@ GenTree* Compiler::fgMorphField(GenTree* tree, MorphAddrContext* mac) addr = gtNewOperNode(GT_ADD, addType, addr, offsetNode); } #endif + + if (!fldMayOverlap) + { + fieldSeq = GetFieldSeqStore()->CreateSingleton(symHnd, fldOffset, FieldSeqNode::FieldKind::Instance); + } + if (fldOffset != 0) { // Generate the "addr" node. @@ -5687,6 +5683,9 @@ GenTree* Compiler::fgMorphField(GenTree* tree, MorphAddrContext* mac) /* indirect to have tlsRef point at the base of the DLLs Thread Local Storage */ tlsRef = gtNewOperNode(GT_IND, TYP_I_IMPL, tlsRef); + assert(!fldMayOverlap); + fieldSeq = GetFieldSeqStore()->CreateSingleton(symHnd, fldOffset, FieldSeqNode::FieldKind::SimpleStatic); + if (fldOffset != 0) { GenTree* fldOffsetNode = new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, fldOffset, fieldSeq); @@ -5721,9 +5720,11 @@ GenTree* Compiler::fgMorphField(GenTree* tree, MorphAddrContext* mac) // For boxed statics, this direct address will be for the box. We have already added // the indirection for the field itself and attached the sequence, in importation. bool isBoxedStatic = gtIsStaticFieldPtrToBoxedStruct(tree->TypeGet(), symHnd); - if (isBoxedStatic) + if (!isBoxedStatic) { - fieldSeq = FieldSeqStore::NotAField(); + // Only simple statics get importred as GT_FIELDs. + fieldSeq = GetFieldSeqStore()->CreateSingleton(symHnd, reinterpret_cast(fldAddr), + FieldSeqNode::FieldKind::SimpleStatic); } // TODO-CQ: enable this optimization for 32 bit targets. @@ -9872,10 +9873,8 @@ GenTree* Compiler::fgMorphBlockOperand(GenTree* tree, var_types asgType, unsigne } if (needsIndirection) { - if (indirTree != nullptr) + if ((indirTree != nullptr) && (indirTree->OperIsBlk() || !isBlkReqd)) { - // If we have an indirection and a block is required, it should already be a block. - assert(indirTree->OperIsBlk() || !isBlkReqd); effectiveVal->gtType = asgType; } else @@ -9894,11 +9893,14 @@ GenTree* Compiler::fgMorphBlockOperand(GenTree* tree, var_types asgType, unsigne newTree = gtNewObjNode(clsHnd, addr); gtSetObjGcInfo(newTree->AsObj()); } + + gtUpdateNodeSideEffects(newTree); } else { newTree = gtNewIndir(asgType, addr); } + effectiveVal = newTree; } } @@ -9920,8 +9922,7 @@ GenTree* Compiler::fgMorphBlockOperand(GenTree* tree, var_types asgType, unsigne // false otherwise. // // Notes: -// This check is needed to avoid accessing LCL_VARs with incorrect -// CORINFO_FIELD_HANDLE that would confuse VN optimizations. +// TODO-PhysicalVN: with the physical VN scheme, this method is no longer needed. // bool Compiler::fgMorphCanUseLclFldForCopy(unsigned lclNum1, unsigned lclNum2) { diff --git a/src/coreclr/jit/morphblock.cpp b/src/coreclr/jit/morphblock.cpp index bfbc35a8fdcd07..3b740ab368635e 100644 --- a/src/coreclr/jit/morphblock.cpp +++ b/src/coreclr/jit/morphblock.cpp @@ -926,12 +926,13 @@ void MorphCopyBlockHelper::MorphStructCases() m_dst = m_comp->fgMorphBlockOperand(m_dst, asgType, m_blockSize, isBlkReqd); m_dst->gtFlags |= GTF_DONT_CSE; m_asg->gtOp1 = m_dst; - m_asg->gtFlags |= (m_dst->gtFlags & GTF_ALL_EFFECT); - // Eliminate the "OBJ or BLK" node on the src. - m_src = m_comp->fgMorphBlockOperand(m_src, asgType, m_blockSize, false /*!isBlkReqd*/); + m_src = m_comp->fgMorphBlockOperand(m_src, asgType, m_blockSize, isBlkReqd); m_asg->gtOp2 = m_src; + m_asg->SetAllEffectsFlags(m_dst, m_src); + m_asg->gtFlags |= GTF_ASG; + m_result = m_asg; m_transformationDecision = BlockTransformation::StructBlock; } @@ -1217,10 +1218,10 @@ GenTree* MorphCopyBlockHelper::CopyFieldByField() CORINFO_CLASS_HANDLE classHnd = srcVarDsc->GetStructHnd(); CORINFO_FIELD_HANDLE fieldHnd = m_comp->info.compCompHnd->getFieldInClass(classHnd, srcFieldVarDsc->lvFldOrdinal); - FieldSeqNode* curFieldSeq = m_comp->GetFieldSeqStore()->CreateSingleton(fieldHnd); - unsigned srcFieldOffset = m_comp->lvaGetDesc(srcFieldLclNum)->lvFldOffset; - var_types srcType = srcFieldVarDsc->TypeGet(); + unsigned srcFieldOffset = m_comp->lvaGetDesc(srcFieldLclNum)->lvFldOffset; + var_types srcType = srcFieldVarDsc->TypeGet(); + FieldSeqNode* curFieldSeq = m_comp->GetFieldSeqStore()->CreateSingleton(fieldHnd, srcFieldOffset); if (!m_dstUseLclFld) { @@ -1316,11 +1317,13 @@ GenTree* MorphCopyBlockHelper::CopyFieldByField() CORINFO_FIELD_HANDLE fieldHnd = m_comp->info.compCompHnd->getFieldInClass(classHnd, m_comp->lvaGetDesc(dstFieldLclNum)->lvFldOrdinal); - FieldSeqNode* curFieldSeq = m_comp->GetFieldSeqStore()->CreateSingleton(fieldHnd); + + unsigned fldOffset = m_comp->lvaGetDesc(dstFieldLclNum)->lvFldOffset; var_types destType = m_comp->lvaGetDesc(dstFieldLclNum)->lvType; + FieldSeqNode* curFieldSeq = m_comp->GetFieldSeqStore()->CreateSingleton(fieldHnd, fldOffset); bool done = false; - if (m_comp->lvaGetDesc(dstFieldLclNum)->lvFldOffset == 0) + if (fldOffset == 0) { // If this is a full-width use of the src via a different type, we need to create a // GT_LCL_FLD. @@ -1347,7 +1350,6 @@ GenTree* MorphCopyBlockHelper::CopyFieldByField() } if (!done) { - unsigned fldOffset = m_comp->lvaGetDesc(dstFieldLclNum)->lvFldOffset; if (!m_srcUseLclFld) { assert(srcAddrClone != nullptr); diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp index 9be1521030b51d..491b326c8cfd2b 100644 --- a/src/coreclr/jit/optimizer.cpp +++ b/src/coreclr/jit/optimizer.cpp @@ -7854,6 +7854,7 @@ bool Compiler::optComputeLoopSideEffectsOfBlock(BasicBlock* blk) GenTreeArrAddr* arrAddr = nullptr; GenTree* baseAddr = nullptr; FieldSeqNode* fldSeq = nullptr; + ssize_t offset = 0; if (arg->IsArrayAddr(&arrAddr)) { @@ -7865,7 +7866,7 @@ bool Compiler::optComputeLoopSideEffectsOfBlock(BasicBlock* blk) // Conservatively assume byrefs may alias this array element memoryHavoc |= memoryKindSet(ByrefExposed); } - else if (arg->IsFieldAddr(this, &baseAddr, &fldSeq)) + else if (arg->IsFieldAddr(this, &baseAddr, &fldSeq, &offset)) { assert((fldSeq != nullptr) && (fldSeq != FieldSeqStore::NotAField())); diff --git a/src/coreclr/jit/valuenum.cpp b/src/coreclr/jit/valuenum.cpp index 5152acdaa9b128..ef71522ff40509 100644 --- a/src/coreclr/jit/valuenum.cpp +++ b/src/coreclr/jit/valuenum.cpp @@ -1778,6 +1778,15 @@ ValueNum ValueNumStore::VNForIntCon(INT32 cnsVal) } } +ValueNum ValueNumStore::VNForIntPtrCon(ssize_t cnsVal) +{ +#ifdef HOST_64BIT + return VNForLongCon(cnsVal); +#else // !HOST_64BIT + return VNForIntCon(cnsVal); +#endif // !HOST_64BIT +} + ValueNum ValueNumStore::VNForLongCon(INT64 cnsVal) { return VnForConst(cnsVal, GetLongCnsMap(), TYP_LONG); @@ -2270,11 +2279,10 @@ ValueNum ValueNumStore::VNForFunc( } //------------------------------------------------------------------------------ -// VNForMapStore : Evaluate VNF_MapStore with the given arguments. -// +// VNForMapStore: Create the VN for a precise store (to a precise map). // // Arguments: -// map - Map value number +// map - (VN of) the precise map // index - Index value number // value - New value for map[index] // @@ -2283,6 +2291,8 @@ ValueNum ValueNumStore::VNForFunc( // ValueNum ValueNumStore::VNForMapStore(ValueNum map, ValueNum index, ValueNum value) { + assert(MapIsPrecise(map)); + BasicBlock* const bb = m_pComp->compCurBB; BasicBlock::loopNumber const loopNum = bb->bbNatLoopNum; ValueNum const result = VNForFunc(TypeOfVN(map), VNF_MapStore, map, index, value, loopNum); @@ -2300,25 +2310,47 @@ ValueNum ValueNumStore::VNForMapStore(ValueNum map, ValueNum index, ValueNum val } //------------------------------------------------------------------------------ -// VNForMapSelect : Evaluate VNF_MapSelect with the given arguments. +// VNForMapPhysicalStore: Create the VN for a physical store (to a physical map). // +// Arguments: +// map - (VN of) the physical map +// offset - The store offset +// size - Size of "value" (in bytes) +// value - The value being stored +// +// Return Value: +// Value number for "map" with "map[offset:offset + size - 1]" set to "value". +// +ValueNum ValueNumStore::VNForMapPhysicalStore(ValueNum map, unsigned offset, unsigned size, ValueNum value) +{ + assert(MapIsPhysical(map)); + + ValueNum selector = EncodePhysicalSelector(offset, size); + ValueNum result = VNForFunc(TypeOfVN(map), VNF_MapPhysicalStore, map, selector, value); + + JITDUMP(" VNForMapPhysicalStore:%s returns ", VNMapTypeName(TypeOfVN(result))); + JITDUMPEXEC(m_pComp->vnPrint(result, 1)); + JITDUMP("\n"); + + return result; +} + +//------------------------------------------------------------------------------ +// VNForMapSelect: Select value from a precise map. // // Arguments: -// vnk - Value number kind -// type - Value type +// vnk - Value number kind (see "VNForMapSelectWork" notes) +// type - The type to select // map - Map value number // index - Index value number // // Return Value: -// Value number for the result of the evaluation. +// Value number corresponding to "map[index]". // -// Notes: -// This requires a "ValueNumKind" because it will attempt, given "select(phi(m1, ..., mk), ind)", to evaluate -// "select(m1, ind)", ..., "select(mk, ind)" to see if they agree. It needs to know which kind of value number -// (liberal/conservative) to read from the SSA def referenced in the phi argument. - ValueNum ValueNumStore::VNForMapSelect(ValueNumKind vnk, var_types type, ValueNum map, ValueNum index) { + assert(MapIsPrecise(map)); + int budget = m_mapSelectBudget; bool usedRecursiveVN = false; ValueNum result = VNForMapSelectWork(vnk, type, map, index, &budget, &usedRecursiveVN); @@ -2326,20 +2358,52 @@ ValueNum ValueNumStore::VNForMapSelect(ValueNumKind vnk, var_types type, ValueNu // The remaining budget should always be between [0..m_mapSelectBudget] assert((budget >= 0) && (budget <= m_mapSelectBudget)); -#ifdef DEBUG - if (m_pComp->verbose) - { - printf(" VNForMapSelect(" FMT_VN ", " FMT_VN "):%s returns ", map, index, VNMapTypeName(type)); - m_pComp->vnPrint(result, 1); - printf("\n"); - } -#endif + JITDUMP(" VNForMapSelect(" FMT_VN ", " FMT_VN "):%s returns ", map, index, VNMapTypeName(type)); + JITDUMPEXEC(m_pComp->vnPrint(result, 1)); + JITDUMP("\n"); + return result; } //------------------------------------------------------------------------------ -// VNForMapSelectWork : A method that does the work for VNForMapSelect and may call itself recursively. +// VNForMapPhysicalSelect: Select value from a physical map. // +// Arguments: +// vnk - Value number kind (see the notes for "VNForMapSelect") +// type - The type to select +// map - (VN of) the physical map +// offset - Offset to select from +// size - Size to select +// +// Return Value: +// Value number corresponding to "map[offset:offset + size - 1]". The value +// number returned can be of a type different from "type", as physical maps +// do not have canonical types, only sizes. +// +ValueNum ValueNumStore::VNForMapPhysicalSelect( + ValueNumKind vnk, var_types type, ValueNum map, unsigned offset, unsigned size) +{ + assert(MapIsPhysical(map)); + + ValueNum selector = EncodePhysicalSelector(offset, size); + int budget = m_mapSelectBudget; + bool usedRecursiveVN = false; + ValueNum result = VNForMapSelectWork(vnk, type, map, selector, &budget, &usedRecursiveVN); + + // The remaining budget should always be between [0..m_mapSelectBudget] + assert((budget >= 0) && (budget <= m_mapSelectBudget)); + + JITDUMP(" VNForMapPhysicalSelect(" FMT_VN ", ", map); + JITDUMPEXEC(vnDumpPhysicalSelector(selector)); + JITDUMP("):%s returns ", VNMapTypeName(type)); + JITDUMPEXEC(m_pComp->vnPrint(result, 1)); + JITDUMP("\n"); + + return result; +} + +//------------------------------------------------------------------------------ +// VNForMapSelectWork : A method that does the work for VNForMapSelect and may call itself recursively. // // Arguments: // vnk - Value number kind @@ -2357,12 +2421,13 @@ ValueNum ValueNumStore::VNForMapSelect(ValueNumKind vnk, var_types type, ValueNu // This requires a "ValueNumKind" because it will attempt, given "select(phi(m1, ..., mk), ind)", to evaluate // "select(m1, ind)", ..., "select(mk, ind)" to see if they agree. It needs to know which kind of value number // (liberal/conservative) to read from the SSA def referenced in the phi argument. - +// ValueNum ValueNumStore::VNForMapSelectWork( ValueNumKind vnk, var_types type, ValueNum map, ValueNum index, int* pBudget, bool* pUsedRecursiveVN) { TailCall: // This label allows us to directly implement a tail call by setting up the arguments, and doing a goto to here. + assert(map != NoVN && index != NoVN); assert(map == VNNormalValue(map)); // Arguments carry no exceptions. assert(index == VNNormalValue(index)); // Arguments carry no exceptions. @@ -2413,172 +2478,251 @@ ValueNum ValueNumStore::VNForMapSelectWork( VNFuncApp funcApp; if (GetVNFunc(map, &funcApp)) { - if (funcApp.m_func == VNF_MapStore) + switch (funcApp.m_func) { - // select(store(m, i, v), i) == v - if (funcApp.m_args[1] == index) + case VNF_MapStore: { + assert(MapIsPrecise(map)); + + // select(store(m, i, v), i) == v + if (funcApp.m_args[1] == index) + { #if FEATURE_VN_TRACE_APPLY_SELECTORS - JITDUMP(" AX1: select([" FMT_VN "]store(" FMT_VN ", " FMT_VN ", " FMT_VN "), " FMT_VN - ") ==> " FMT_VN ".\n", - funcApp.m_args[0], map, funcApp.m_args[1], funcApp.m_args[2], index, funcApp.m_args[2]); + JITDUMP(" AX1: select([" FMT_VN "]store(" FMT_VN ", " FMT_VN ", " FMT_VN "), " FMT_VN + ") ==> " FMT_VN ".\n", + funcApp.m_args[0], map, funcApp.m_args[1], funcApp.m_args[2], index, funcApp.m_args[2]); #endif - m_pComp->optRecordLoopMemoryDependence(m_pComp->compCurTree, m_pComp->compCurBB, funcApp.m_args[0]); - return funcApp.m_args[2]; + m_pComp->optRecordLoopMemoryDependence(m_pComp->compCurTree, m_pComp->compCurBB, + funcApp.m_args[0]); + return funcApp.m_args[2]; + } + // i # j ==> select(store(m, i, v), j) == select(m, j) + // Currently the only source of distinctions is when both indices are constants. + else if (IsVNConstant(index) && IsVNConstant(funcApp.m_args[1])) + { + assert(funcApp.m_args[1] != index); // we already checked this above. +#if FEATURE_VN_TRACE_APPLY_SELECTORS + JITDUMP(" AX2: " FMT_VN " != " FMT_VN " ==> select([" FMT_VN "]store(" FMT_VN ", " FMT_VN + ", " FMT_VN "), " FMT_VN ") ==> select(" FMT_VN ", " FMT_VN + ") remaining budget is %d.\n", + index, funcApp.m_args[1], map, funcApp.m_args[0], funcApp.m_args[1], funcApp.m_args[2], + index, funcApp.m_args[0], index, *pBudget); +#endif + // This is the equivalent of the recursive tail call: + // return VNForMapSelect(vnk, typ, funcApp.m_args[0], index); + // Make sure we capture any exceptions from the "i" and "v" of the store... + map = funcApp.m_args[0]; + goto TailCall; + } } - // i # j ==> select(store(m, i, v), j) == select(m, j) - // Currently the only source of distinctions is when both indices are constants. - else if (IsVNConstant(index) && IsVNConstant(funcApp.m_args[1])) + break; + + case VNF_MapPhysicalStore: { - assert(funcApp.m_args[1] != index); // we already checked this above. + assert(MapIsPhysical(map)); + +#if FEATURE_VN_TRACE_APPLY_SELECTORS + JITDUMP(" select("); + JITDUMPEXEC(m_pComp->vnPrint(map, 1)); + JITDUMP(", "); + JITDUMPEXEC(vnDumpPhysicalSelector(index)); + JITDUMP(")"); +#endif + ValueNum storeSelector = funcApp.m_args[1]; + + if (index == storeSelector) + { +#if FEATURE_VN_TRACE_APPLY_SELECTORS + JITDUMP(" ==> " FMT_VN "\n", funcApp.m_args[2]); +#endif + return funcApp.m_args[2]; + } + + unsigned selectSize; + unsigned selectOffset = DecodePhysicalSelector(index, &selectSize); + + unsigned storeSize; + unsigned storeOffset = DecodePhysicalSelector(storeSelector, &storeSize); + + unsigned selectEndOffset = selectOffset + selectSize; // Exclusive. + unsigned storeEndOffset = storeOffset + storeSize; // Exclusive. + + if ((storeOffset <= selectOffset) && (selectEndOffset <= storeEndOffset)) + { +#if FEATURE_VN_TRACE_APPLY_SELECTORS + JITDUMP(" ==> enclosing, selecting inner, remaining budget is %d\n", *pBudget); +#endif + map = funcApp.m_args[2]; + index = EncodePhysicalSelector(selectOffset - storeOffset, selectSize); + goto TailCall; + } + + // If it was disjoint with the location being selected, continue the linear search. + if ((storeEndOffset <= selectOffset) || (selectEndOffset <= storeOffset)) + { #if FEATURE_VN_TRACE_APPLY_SELECTORS - JITDUMP(" AX2: " FMT_VN " != " FMT_VN " ==> select([" FMT_VN "]store(" FMT_VN ", " FMT_VN - ", " FMT_VN "), " FMT_VN ") ==> select(" FMT_VN ", " FMT_VN ") remaining budget is %d.\n", - index, funcApp.m_args[1], map, funcApp.m_args[0], funcApp.m_args[1], funcApp.m_args[2], - index, funcApp.m_args[0], index, *pBudget); + JITDUMP(" ==> disjoint, remaining budget is %d\n", *pBudget); #endif - // This is the equivalent of the recursive tail call: - // return VNForMapSelect(vnk, typ, funcApp.m_args[0], index); - // Make sure we capture any exceptions from the "i" and "v" of the store... + map = funcApp.m_args[0]; + goto TailCall; + } + else + { +#if FEATURE_VN_TRACE_APPLY_SELECTORS + JITDUMP(" ==> aliasing!\n"); +#endif + } + } + break; + + case VNF_BitCast: + assert(MapIsPhysical(map)); +#if FEATURE_VN_TRACE_APPLY_SELECTORS + JITDUMP(" select(bitcast<%s>(" FMT_VN ")) ==> select(" FMT_VN ")\n", + varTypeName(TypeOfVN(funcApp.m_args[0])), funcApp.m_args[0], funcApp.m_args[0]); +#endif // FEATURE_VN_TRACE_APPLY_SELECTORS + map = funcApp.m_args[0]; goto TailCall; - } - } - else if (funcApp.m_func == VNF_PhiDef || funcApp.m_func == VNF_PhiMemoryDef) - { - unsigned lclNum = BAD_VAR_NUM; - bool isMemory = false; - VNFuncApp phiFuncApp; - bool defArgIsFunc = false; - if (funcApp.m_func == VNF_PhiDef) - { - lclNum = unsigned(funcApp.m_args[0]); - defArgIsFunc = GetVNFunc(funcApp.m_args[2], &phiFuncApp); - } - else - { - assert(funcApp.m_func == VNF_PhiMemoryDef); - isMemory = true; - defArgIsFunc = GetVNFunc(funcApp.m_args[1], &phiFuncApp); - } - if (defArgIsFunc && phiFuncApp.m_func == VNF_Phi) - { - // select(phi(m1, m2), x): if select(m1, x) == select(m2, x), return that, else new fresh. - // Get the first argument of the phi. - // We need to be careful about breaking infinite recursion. Record the outer select. - m_fixedPointMapSels.Push(VNDefFuncApp<2>(VNF_MapSelect, map, index)); + case VNF_ZeroObj: + assert(MapIsPhysical(map)); - assert(IsVNConstant(phiFuncApp.m_args[0])); - unsigned phiArgSsaNum = ConstantValue(phiFuncApp.m_args[0]); - ValueNum phiArgVN; - if (isMemory) + // TODO-CQ: support selection of TYP_STRUCT here. + if (type != TYP_STRUCT) { - phiArgVN = m_pComp->GetMemoryPerSsaData(phiArgSsaNum)->m_vnPair.Get(vnk); + return VNZeroForType(type); + } + break; + + case VNF_PhiDef: + case VNF_PhiMemoryDef: + { + unsigned lclNum = BAD_VAR_NUM; + bool isMemory = false; + VNFuncApp phiFuncApp; + bool defArgIsFunc = false; + if (funcApp.m_func == VNF_PhiDef) + { + lclNum = unsigned(funcApp.m_args[0]); + defArgIsFunc = GetVNFunc(funcApp.m_args[2], &phiFuncApp); } else { - phiArgVN = m_pComp->lvaTable[lclNum].GetPerSsaData(phiArgSsaNum)->m_vnPair.Get(vnk); + assert(funcApp.m_func == VNF_PhiMemoryDef); + isMemory = true; + defArgIsFunc = GetVNFunc(funcApp.m_args[1], &phiFuncApp); } - if (phiArgVN != ValueNumStore::NoVN) + if (defArgIsFunc && phiFuncApp.m_func == VNF_Phi) { - bool allSame = true; - ValueNum argRest = phiFuncApp.m_args[1]; - ValueNum sameSelResult = - VNForMapSelectWork(vnk, type, phiArgVN, index, pBudget, pUsedRecursiveVN); - - // It is possible that we just now exceeded our budget, if so we need to force an early exit - // and stop calling VNForMapSelectWork - if (*pBudget <= 0) + // select(phi(m1, m2), x): if select(m1, x) == select(m2, x), return that, else new fresh. + // Get the first argument of the phi. + + // We need to be careful about breaking infinite recursion. Record the outer select. + m_fixedPointMapSels.Push(VNDefFuncApp<2>(VNF_MapSelect, map, index)); + + assert(IsVNConstant(phiFuncApp.m_args[0])); + unsigned phiArgSsaNum = ConstantValue(phiFuncApp.m_args[0]); + ValueNum phiArgVN; + if (isMemory) { - // We don't have any budget remaining to verify that all phiArgs are the same - // so setup the default failure case now. - allSame = false; + phiArgVN = m_pComp->GetMemoryPerSsaData(phiArgSsaNum)->m_vnPair.Get(vnk); } - - while (allSame && argRest != ValueNumStore::NoVN) + else { - ValueNum cur = argRest; - VNFuncApp phiArgFuncApp; - if (GetVNFunc(argRest, &phiArgFuncApp) && phiArgFuncApp.m_func == VNF_Phi) - { - cur = phiArgFuncApp.m_args[0]; - argRest = phiArgFuncApp.m_args[1]; - } - else - { - argRest = ValueNumStore::NoVN; // Cause the loop to terminate. - } - assert(IsVNConstant(cur)); - phiArgSsaNum = ConstantValue(cur); - if (isMemory) - { - phiArgVN = m_pComp->GetMemoryPerSsaData(phiArgSsaNum)->m_vnPair.Get(vnk); - } - else - { - phiArgVN = m_pComp->lvaTable[lclNum].GetPerSsaData(phiArgSsaNum)->m_vnPair.Get(vnk); - } - if (phiArgVN == ValueNumStore::NoVN) + phiArgVN = m_pComp->lvaTable[lclNum].GetPerSsaData(phiArgSsaNum)->m_vnPair.Get(vnk); + } + if (phiArgVN != ValueNumStore::NoVN) + { + bool allSame = true; + ValueNum argRest = phiFuncApp.m_args[1]; + ValueNum sameSelResult = + VNForMapSelectWork(vnk, type, phiArgVN, index, pBudget, pUsedRecursiveVN); + + // It is possible that we just now exceeded our budget, if so we need to force an early exit + // and stop calling VNForMapSelectWork + if (*pBudget <= 0) { + // We don't have any budget remaining to verify that all phiArgs are the same + // so setup the default failure case now. allSame = false; } - else + + while (allSame && argRest != ValueNumStore::NoVN) { - bool usedRecursiveVN = false; - ValueNum curResult = - VNForMapSelectWork(vnk, type, phiArgVN, index, pBudget, &usedRecursiveVN); - *pUsedRecursiveVN |= usedRecursiveVN; - if (sameSelResult == ValueNumStore::RecursiveVN) + ValueNum cur = argRest; + VNFuncApp phiArgFuncApp; + if (GetVNFunc(argRest, &phiArgFuncApp) && phiArgFuncApp.m_func == VNF_Phi) { - sameSelResult = curResult; + cur = phiArgFuncApp.m_args[0]; + argRest = phiArgFuncApp.m_args[1]; } - if (curResult != ValueNumStore::RecursiveVN && curResult != sameSelResult) + else + { + argRest = ValueNumStore::NoVN; // Cause the loop to terminate. + } + assert(IsVNConstant(cur)); + phiArgSsaNum = ConstantValue(cur); + if (isMemory) + { + phiArgVN = m_pComp->GetMemoryPerSsaData(phiArgSsaNum)->m_vnPair.Get(vnk); + } + else + { + phiArgVN = m_pComp->lvaTable[lclNum].GetPerSsaData(phiArgSsaNum)->m_vnPair.Get(vnk); + } + if (phiArgVN == ValueNumStore::NoVN) { allSame = false; } + else + { + bool usedRecursiveVN = false; + ValueNum curResult = + VNForMapSelectWork(vnk, type, phiArgVN, index, pBudget, &usedRecursiveVN); + + *pUsedRecursiveVN |= usedRecursiveVN; + if (sameSelResult == ValueNumStore::RecursiveVN) + { + sameSelResult = curResult; + } + if (curResult != ValueNumStore::RecursiveVN && curResult != sameSelResult) + { + allSame = false; + } + } } - } - if (allSame && sameSelResult != ValueNumStore::RecursiveVN) - { - // Make sure we're popping what we pushed. - assert(FixedPointMapSelsTopHasValue(map, index)); - m_fixedPointMapSels.Pop(); - - // To avoid exponential searches, we make sure that this result is memo-ized. - // The result is always valid for memoization if we didn't rely on RecursiveVN to get it. - // If RecursiveVN was used, we are processing a loop and we can't memo-ize this intermediate - // result if, e.g., this block is in a multi-entry loop. - if (!*pUsedRecursiveVN) + if (allSame && sameSelResult != ValueNumStore::RecursiveVN) { - GetVNFunc2Map()->Set(fstruct, sameSelResult); - } + // Make sure we're popping what we pushed. + assert(FixedPointMapSelsTopHasValue(map, index)); + m_fixedPointMapSels.Pop(); + + // To avoid exponential searches, we make sure that this result is memo-ized. + // The result is always valid for memoization if we didn't rely on RecursiveVN to get + // it. + // If RecursiveVN was used, we are processing a loop and we can't memo-ize this + // intermediate + // result if, e.g., this block is in a multi-entry loop. + if (!*pUsedRecursiveVN) + { + GetVNFunc2Map()->Set(fstruct, sameSelResult); + } - return sameSelResult; + return sameSelResult; + } + // Otherwise, fall through to creating the select(phi(m1, m2), x) function application. } - // Otherwise, fall through to creating the select(phi(m1, m2), x) function application. + // Make sure we're popping what we pushed. + assert(FixedPointMapSelsTopHasValue(map, index)); + m_fixedPointMapSels.Pop(); } - // Make sure we're popping what we pushed. - assert(FixedPointMapSelsTopHasValue(map, index)); - m_fixedPointMapSels.Pop(); - } - } - else if (funcApp.m_func == VNF_ZeroObj) - { - // For structs, we need to extract the handle from the selector. - if (type == TYP_STRUCT) - { - // We only expect field selectors here. - assert(GetHandleFlags(index) == GTF_ICON_FIELD_HDL); - CORINFO_FIELD_HANDLE fieldHnd = CORINFO_FIELD_HANDLE(ConstantValue(index)); - CORINFO_CLASS_HANDLE structHnd = NO_CLASS_HANDLE; - m_pComp->eeGetFieldType(fieldHnd, &structHnd); - - return VNForZeroObj(structHnd); } + break; - return VNZeroForType(type); + default: + break; } } @@ -2600,37 +2744,77 @@ ValueNum ValueNumStore::VNForMapSelectWork( } } +//------------------------------------------------------------------------ +// EncodePhysicalSelector: Get the VN representing a physical selector. +// +// Arguments: +// offset - The offset to encode +// size - The size to encode +// +// Return Value: +// VN encoding the "{ offset, size }" tuple. +// +ValueNum ValueNumStore::EncodePhysicalSelector(unsigned offset, unsigned size) +{ + assert(size != 0); + + return VNForLongCon(static_cast(offset) | (static_cast(size) << 32)); +} + +//------------------------------------------------------------------------ +// DecodePhysicalSelector: Get the components of a physical selector from the +// VN representing one. +// +// Arguments: +// selector - The VN of the selector obtained via "EncodePhysicalSelector" +// pSize - [out] parameter for the size +// +// Return Value: +// The offset. +// +unsigned ValueNumStore::DecodePhysicalSelector(ValueNum selector, unsigned* pSize) +{ + uint64_t value = ConstantValue(selector); + unsigned offset = static_cast(value); + unsigned size = static_cast(value >> 32); + + *pSize = size; + return offset; +} + //------------------------------------------------------------------------ // VNForFieldSelector: A specialized version (with logging) of VNForHandle // that is used for field handle selectors. // // Arguments: -// fieldHnd - handle of the field in question -// pFieldType - [out] parameter for the field's type -// pStructSize - optional [out] parameter for the size of the struct, -// populated if the field in question is of a struct type, -// otherwise set to zero +// fieldHnd - handle of the field in question +// pFieldType - [out] parameter for the field's type +// pSize - optional [out] parameter for the size of the field // // Return Value: // Value number corresponding to the given field handle. // -ValueNum ValueNumStore::VNForFieldSelector(CORINFO_FIELD_HANDLE fieldHnd, var_types* pFieldType, size_t* pStructSize) +ValueNum ValueNumStore::VNForFieldSelector(CORINFO_FIELD_HANDLE fieldHnd, var_types* pFieldType, unsigned* pSize) { - CORINFO_CLASS_HANDLE structHnd = NO_CLASS_HANDLE; - ValueNum fldHndVN = VNForHandle(ssize_t(fieldHnd), GTF_ICON_FIELD_HDL); - var_types fieldType = m_pComp->eeGetFieldType(fieldHnd, &structHnd); - size_t structSize = 0; + CORINFO_CLASS_HANDLE structHnd = NO_CLASS_HANDLE; + ValueNum fldHndVN = VNForHandle(ssize_t(fieldHnd), GTF_ICON_FIELD_HDL); + var_types fieldType = m_pComp->eeGetFieldType(fieldHnd, &structHnd); + unsigned size = 0; if (fieldType == TYP_STRUCT) { - structSize = m_pComp->info.compCompHnd->getClassSize(structHnd); + size = m_pComp->info.compCompHnd->getClassSize(structHnd); // We have to normalize here since there is no CorInfoType for vectors... - if (m_pComp->structSizeMightRepresentSIMDType(structSize)) + if (m_pComp->structSizeMightRepresentSIMDType(size)) { fieldType = m_pComp->impNormStructType(structHnd); } } + else + { + size = genTypeSize(fieldType); + } #ifdef DEBUG if (m_pComp->verbose) @@ -2638,19 +2822,16 @@ ValueNum ValueNumStore::VNForFieldSelector(CORINFO_FIELD_HANDLE fieldHnd, var_ty const char* modName; const char* fldName = m_pComp->eeGetFieldName(fieldHnd, &modName); printf(" VNForHandle(%s) is " FMT_VN ", fieldType is %s", fldName, fldHndVN, varTypeName(fieldType)); - if (structSize != 0) + if (size != 0) { - printf(", size = %u", structSize); + printf(", size = %u", size); } printf("\n"); } #endif - if (pStructSize != nullptr) - { - *pStructSize = structSize; - } *pFieldType = fieldType; + *pSize = size; return fldHndVN; } @@ -3981,227 +4162,210 @@ ValueNum ValueNumStore::VNForExpr(BasicBlock* block, var_types typ) } //------------------------------------------------------------------------ -// VNApplySelectors: Find the value number corresponding to map[fieldSeq...]. -// -// Will construct the value number which is the result of selecting from "map" -// with all the fields (VNs of their handles): map[f0][f1]...[fN], indexing -// from outer to inner. The type of the returned number will be that of "f0", -// and all the "inner" maps in the indexing (map[f0][f1], ...) will also have -// the types of their selector fields. -// -// Essentially, this is a helper method that calls VNForMapSelect for the fields -// and provides some logging. +// VNForLoad: Get the VN for a load from a location (physical map). // // Arguments: -// vnk - the value number kind to use when recursing through phis -// map - the map to select from -// fieldSeq - the fields to use as selectors -// wbFinalStructSize - optional [out] parameter for the size of the last struct -// field in the sequence +// vnk - The kind of VN to select (see "VNForMapSelectWork" notes) +// locationValue - (VN of) the value location has +// locationSize - Size of the location +// loadType - Type being loaded +// offset - In-location offset being loaded from +// loadSize - Number of bytes being loaded // // Return Value: -// Value number corresponding to indexing "map" with all the fields. -// If the field sequence is empty ("nullptr"), will simply return "map". +// Value number representing "locationValue[offset:offset + loadSize - 1]", +// normalized to the same actual type as "loadType". Handles out-of-bounds +// loads by returning a "new, unique" VN. // -ValueNum ValueNumStore::VNApplySelectors(ValueNumKind vnk, - ValueNum map, - FieldSeqNode* fieldSeq, - size_t* wbFinalStructSize) +ValueNum ValueNumStore::VNForLoad(ValueNumKind vnk, + ValueNum locationValue, + unsigned locationSize, + var_types loadType, + ssize_t offset, + unsigned loadSize) { - for (FieldSeqNode* field = fieldSeq; field != nullptr; field = field->GetNext()) - { - assert(field != FieldSeqStore::NotAField()); + assert((loadSize > 0)); - JITDUMP(" VNApplySelectors:\n"); - var_types fieldType; - size_t structSize; - ValueNum fldHndVN = VNForFieldSelector(field->GetFieldHandle(), &fieldType, &structSize); + unsigned loadOffset = static_cast(offset); - if (wbFinalStructSize != nullptr) - { - *wbFinalStructSize = structSize; - } + if ((offset < 0) || (locationSize < (loadOffset + loadSize))) + { + JITDUMP(" *** VNForLoad: out-of-bounds load!\n"); + return VNForExpr(m_pComp->compCurBB, loadType); + } - map = VNForMapSelect(vnk, fieldType, map, fldHndVN); + ValueNum loadValue; + if (LoadStoreIsEntire(locationSize, loadOffset, loadSize)) + { + loadValue = locationValue; } + else + { + JITDUMP(" VNForLoad:\n"); + loadValue = VNForMapPhysicalSelect(vnk, loadType, locationValue, loadOffset, loadSize); + } + + // Unlike with stores, loads we always normalize (to have the property that the tree's type + // is the same as its VN's). + loadValue = VNForLoadStoreBitCast(loadValue, loadType, loadSize); + + assert(genActualType(TypeOfVN(loadValue)) == genActualType(loadType)); - return map; + return loadValue; } //------------------------------------------------------------------------ -// VNApplySelectorsTypeCheck: Constructs VN for an indirect read. +// VNPairForLoad: VNForLoad applied to a ValueNumPair. // -// Returns VN corresponding to the value of "IND(indType (ADDR(value))". -// May return a "new, unique" VN for un-analyzable reads (those of structs -// or out-of-bounds ones), or "value" cast to "indType", or "value" itself -// if there was no type mismatch. This function is intended to be used after -// VNApplySelectors has determined the value a particular location has, that -// is "value", and it is being read through an indirection of "indType". +ValueNumPair ValueNumStore::VNPairForLoad( + ValueNumPair locationValue, unsigned locationSize, var_types loadType, ssize_t offset, unsigned loadSize) +{ + ValueNum liberalVN = VNForLoad(VNK_Liberal, locationValue.GetLiberal(), locationSize, loadType, offset, loadSize); + ValueNum conservVN = + VNForLoad(VNK_Conservative, locationValue.GetConservative(), locationSize, loadType, offset, loadSize); + + return ValueNumPair(liberalVN, conservVN); +} + +//------------------------------------------------------------------------ +// VNForStore: Get the VN for a store to a location (physical map). // // Arguments: -// value - (VN of) value the location has -// indType - the type through which the location is being read -// valueStructSize - size of "value" if is of a struct type +// locationValue - (VN of) the value location had before the store +// locationSize - Size of the location +// offset - In-location offset being stored to +// storeSize - Number of bytes being stored +// value - (VN of) the value being stored // // Return Value: -// The constructed value number (see description). +// Value number for "locationValue" with "storeSize" bytes starting at +// "offset" set to "value". "NoVN" in case of an out-of-bounds store +// (the caller is expected to explicitly handle that). // // Notes: -// TODO-Bug: it is known that this function does not currently handle -// all cases correctly. E. g. it incorrectly returns CAST(float <- int) -// for cases like IND(float ADDR(int)). It is also too conservative for -// reads of SIMD types (treats them as un-analyzable). +// Does not handle "entire" (whole/identity) stores. // -ValueNum ValueNumStore::VNApplySelectorsTypeCheck(ValueNum value, var_types indType, size_t valueStructSize) +ValueNum ValueNumStore::VNForStore( + ValueNum locationValue, unsigned locationSize, ssize_t offset, unsigned storeSize, ValueNum value) { - var_types typeOfValue = TypeOfVN(value); - - // Check if the typeOfValue is matching/compatible + assert(storeSize > 0); - if (indType != typeOfValue) - { - // We are trying to read from an 'elem' of type 'elemType' using 'indType' read + // The caller is expected to handle identity stores, applying the appropriate normalization policy. + assert(!LoadStoreIsEntire(locationSize, offset, storeSize)); - size_t elemTypSize = (typeOfValue == TYP_STRUCT) ? valueStructSize : genTypeSize(typeOfValue); - size_t indTypeSize = genTypeSize(indType); + unsigned storeOffset = static_cast(offset); - if (indTypeSize > elemTypSize) - { - // Reading beyong the end of "value", return a new unique value number. - value = VNMakeNormalUnique(value); + if ((offset < 0) || (locationSize < (storeOffset + storeSize))) + { + JITDUMP(" *** VNForStore: out-of-bounds store -- location size is %u, offset is %zd, store size is %u\n", + locationSize, offset, storeSize); + // Some callers will need to invalidate parenting maps, so force explicit + // handling of this case instead of returning a "new, unique" VN. + return NoVN; + } - JITDUMP(" *** Mismatched types in VNApplySelectorsTypeCheck (reading beyond the end)\n"); - } - else if (varTypeIsStruct(indType)) - { - // We do not know how wide this indirection is - return a new unique value number. - value = VNMakeNormalUnique(value); + JITDUMP(" VNForStore:\n"); + return VNForMapPhysicalStore(locationValue, storeOffset, storeSize, value); +} - JITDUMP(" *** Mismatched types in VNApplySelectorsTypeCheck (indType is TYP_STRUCT)\n"); - } - else - { - // We are trying to read "value" of type "typeOfValue" using "indType" read. - // Insert a cast - this handles small types, i. e. "IND(byte ADDR(int))". - value = VNForCast(value, indType, typeOfValue); - } +//------------------------------------------------------------------------ +// VNPairForStore: VNForStore applied to a ValueNumPair. +// +ValueNumPair ValueNumStore::VNPairForStore( + ValueNumPair locationValue, unsigned locationSize, ssize_t offset, unsigned storeSize, ValueNumPair value) +{ + ValueNum liberalVN = VNForStore(locationValue.GetLiberal(), locationSize, offset, storeSize, value.GetLiberal()); + ValueNum conservVN; + if (locationValue.BothEqual() && value.BothEqual()) + { + conservVN = liberalVN; + } + else + { + conservVN = + VNForStore(locationValue.GetConservative(), locationSize, offset, storeSize, value.GetConservative()); } - return value; + return ValueNumPair(liberalVN, conservVN); } //------------------------------------------------------------------------ -// VNApplySelectorsAssignTypeCoerce: Compute the value number corresponding to `value` -// being written using an indirection of 'dstIndType'. +// VNForLoadStoreBitCast: Normalize a value number to the desired type. // // Arguments: -// value - value number for the value being stored; -// dstIndType - type of the indirection storing the value to the memory; +// value - (VN of) the value needed normalization +// indType - The type to normalize to +// indSize - The size of "indType" and "value" (relevant for structs) // // Return Value: -// The value number corresponding to memory after the assignment. +// Value number the logical "BitCast(value)". // -// Notes: It may insert a cast to dstIndType or return a unique value number for an incompatible indType. +// Notes: +// As far as the physical maps are concerned, all values with the same +// size "are equal". However, both IR and the rest of VN do distinguish +// between "4 bytes of TYP_INT" and "4 bytes of TYP_FLOAT". This method +// is called in cases where that gap needs to be bridged and the value +// "normalized" to the appropriate type. Notably, this normalization is +// only performed for primitives -- TYP_STRUCTs of different handles but +// same size are treated as equal (intentionally so -- this is good from +// CQ, TP and simplicity standpoints). // -ValueNum ValueNumStore::VNApplySelectorsAssignTypeCoerce(ValueNum value, var_types dstIndType) +ValueNum ValueNumStore::VNForLoadStoreBitCast(ValueNum value, var_types indType, unsigned indSize) { - var_types srcType = TypeOfVN(value); + var_types typeOfValue = TypeOfVN(value); - // Check if the srcType is matching/compatible. - if (dstIndType != srcType) + if (typeOfValue != indType) { - bool isConstant = IsVNConstant(value); - if (isConstant && (srcType == genActualType(dstIndType))) + if ((typeOfValue == TYP_STRUCT) || (indType == TYP_STRUCT)) { - // (i.e. We recorded a constant of TYP_INT for a TYP_BYTE field) + value = VNForBitCast(value, indType); } else { - // We are trying to write an 'elem' of type 'elemType' using 'indType' store + assert(genTypeSize(indType) == indSize); - if (varTypeIsStruct(dstIndType)) + // TODO-PhysicalVN: remove this pessimization. + if (varTypeIsSIMD(indType)) { - // return a new unique value number - value = VNMakeNormalUnique(value); - - JITDUMP(" *** Mismatched types in VNApplySelectorsAssignTypeCoerce (indType is TYP_STRUCT)\n"); + JITDUMP(" *** Mismatched types in VNForLoadStoreBitcast (indType is SIMD)\n"); + value = VNForExpr(m_pComp->compCurBB, indType); + } + else if (!varTypeIsFloating(typeOfValue) && !varTypeIsFloating(indType)) + { + // TODO-PhysicalVN: implement constant folding for bitcasts and remove this special case. + value = VNForCast(value, indType, TypeOfVN(value)); } else { - // We are trying to write an 'elem' of type 'elemType' using 'indType' store - - // insert a cast of elem to 'indType' - value = VNForCast(value, dstIndType, srcType); - - JITDUMP(" Cast to %s inserted in VNApplySelectorsAssignTypeCoerce (elemTyp is %s)\n", - varTypeName(dstIndType), varTypeName(srcType)); + value = VNForBitCast(value, indType); } } + + JITDUMP(" VNForLoadStoreBitcast returns "); + JITDUMPEXEC(m_pComp->vnPrint(value, 1)); + JITDUMP("\n"); } + assert(genActualType(TypeOfVN(value)) == genActualType(indType)); + return value; } //------------------------------------------------------------------------ -// VNApplySelectorsAssign: Compute the value number corresponding to "map" but with -// the element at "fieldSeq" updated to be equal to "'value' written as 'indType'"; -// this is the new memory value for an assignment of "value" into the memory at -// location "fieldSeq" that occurs in the current block (so long as the selectors -// into that memory occupy disjoint locations, which is true for GcHeap). -// -// Arguments: -// vnk - identifies whether to recurse to Conservative or Liberal value numbers -// when recursing through phis -// map - value number for the field map before the assignment -// value - value number for the value being stored (to the given field) -// dstIndType - type of the indirection storing the value -// -// Return Value: -// The value number corresponding to memory ("map") after the assignment. +// VNPairForLoadStoreBitCast: VNForLoadStoreBitCast applied to a ValueNumPair. // -ValueNum ValueNumStore::VNApplySelectorsAssign( - ValueNumKind vnk, ValueNum map, FieldSeqNode* fieldSeq, ValueNum value, var_types dstIndType) +ValueNumPair ValueNumStore::VNPairForLoadStoreBitCast(ValueNumPair value, var_types indType, unsigned indSize) { - if (fieldSeq == nullptr) + ValueNum liberalVN = VNForLoadStoreBitCast(value.GetLiberal(), indType, indSize); + ValueNum conservVN; + if (value.BothEqual()) { - return VNApplySelectorsAssignTypeCoerce(value, dstIndType); + conservVN = liberalVN; } else { - assert(fieldSeq != FieldSeqStore::NotAField()); - - if (fieldSeq->GetNext() == nullptr) - { - JITDUMP(" VNApplySelectorsAssign:\n"); - } - - var_types fieldType; - ValueNum fldHndVN = VNForFieldSelector(fieldSeq->GetFieldHandle(), &fieldType); - - ValueNum valueAfter; - if (fieldSeq->GetNext() != nullptr) - { - ValueNum fseqMap = VNForMapSelect(vnk, fieldType, map, fldHndVN); - valueAfter = VNApplySelectorsAssign(vnk, fseqMap, fieldSeq->GetNext(), value, dstIndType); - } - else - { - valueAfter = VNApplySelectorsAssignTypeCoerce(value, dstIndType); - } - - return VNForMapStore(map, fldHndVN, valueAfter); + conservVN = VNForLoadStoreBitCast(value.GetConservative(), indType, indSize); } -} - -ValueNumPair ValueNumStore::VNPairApplySelectors(ValueNumPair map, FieldSeqNode* fieldSeq, var_types indType) -{ - size_t structSize = 0; - ValueNum liberalVN = VNApplySelectors(VNK_Liberal, map.GetLiberal(), fieldSeq, &structSize); - liberalVN = VNApplySelectorsTypeCheck(liberalVN, indType, structSize); - - structSize = 0; - ValueNum conservVN = VNApplySelectors(VNK_Conservative, map.GetConservative(), fieldSeq, &structSize); - conservVN = VNApplySelectorsTypeCheck(conservVN, indType, structSize); return ValueNumPair(liberalVN, conservVN); } @@ -4320,13 +4484,13 @@ ValueNum ValueNumStore::ExtendPtrVN(GenTree* opA, GenTree* opB) FieldSeqNode* fldSeq = opB->AsIntCon()->gtFieldSeq; if (fldSeq != nullptr) { - return ExtendPtrVN(opA, fldSeq); + return ExtendPtrVN(opA, fldSeq, opB->AsIntCon()->IconValue()); } } return NoVN; } -ValueNum ValueNumStore::ExtendPtrVN(GenTree* opA, FieldSeqNode* fldSeq) +ValueNum ValueNumStore::ExtendPtrVN(GenTree* opA, FieldSeqNode* fldSeq, ssize_t offset) { assert(fldSeq != nullptr); @@ -4356,12 +4520,18 @@ ValueNum ValueNumStore::ExtendPtrVN(GenTree* opA, FieldSeqNode* fldSeq) } else if (funcApp.m_func == VNF_PtrToStatic) { - res = VNForFunc(TYP_BYREF, VNF_PtrToStatic, funcApp.m_args[0], FieldSeqVNAppend(funcApp.m_args[1], fldSeq)); + res = VNForFunc(TYP_BYREF, VNF_PtrToStatic, funcApp.m_args[0], FieldSeqVNAppend(funcApp.m_args[1], fldSeq), + VNForIntPtrCon(ConstantValue(funcApp.m_args[2]) + offset)); } else if (funcApp.m_func == VNF_PtrToArrElem) { + VNFuncApp offsFunc; + GetVNFunc(funcApp.m_args[3], &offsFunc); + ValueNum fieldSeqVN = FieldSeqVNAppend(offsFunc.m_args[0], fldSeq); + ValueNum offsetVN = VNForIntPtrCon(ConstantValue(offsFunc.m_args[1]) + offset); + res = VNForFunc(TYP_BYREF, VNF_PtrToArrElem, funcApp.m_args[0], funcApp.m_args[1], funcApp.m_args[2], - FieldSeqVNAppend(funcApp.m_args[3], fldSeq)); + VNForFunc(TYP_I_IMPL, VNF_ArrElemOffset, fieldSeqVN, offsetVN)); } if (res != NoVN) { @@ -4371,28 +4541,182 @@ ValueNum ValueNumStore::ExtendPtrVN(GenTree* opA, FieldSeqNode* fldSeq) return res; } -ValueNum Compiler::fgValueNumberArrIndexAssign(CORINFO_CLASS_HANDLE elemTypeEq, - ValueNum arrVN, - ValueNum inxVN, - FieldSeqNode* fldSeq, - ValueNum rhsVN, - var_types indType) +//------------------------------------------------------------------------ +// fgValueNumberLocalStore: Assign VNs to the SSA definition corresponding +// to a local store. +// +// Or update the current heap state in case the local was address-exposed. +// +// Arguments: +// storeNode - The node performing the store +// lclDefNode - The local node representating the SSA definition +// offset - The offset, relative to the local, of the target location +// storeSize - The number of bytes being stored +// value - (VN of) the value being stored +// normalize - Whether "value" should be normalized to the local's type +// (in case the store overwrites the entire variable) before +// being written to the SSA descriptor +// +void Compiler::fgValueNumberLocalStore(GenTree* storeNode, + GenTreeLclVarCommon* lclDefNode, + ssize_t offset, + unsigned storeSize, + ValueNumPair value, + bool normalize) { - bool invalidateArray = false; - ValueNum elemTypeEqVN = vnStore->VNForHandle(ssize_t(elemTypeEq), GTF_ICON_CLASS_HDL); - var_types arrElemType = DecodeElemType(elemTypeEq); - ValueNum hAtArrType = vnStore->VNForMapSelect(VNK_Liberal, TYP_MEM, fgCurMemoryVN[GcHeap], elemTypeEqVN); - ValueNum hAtArrTypeAtArr = vnStore->VNForMapSelect(VNK_Liberal, TYP_MEM, hAtArrType, arrVN); - ValueNum hAtArrTypeAtArrAtInx = vnStore->VNForMapSelect(VNK_Liberal, arrElemType, hAtArrTypeAtArr, inxVN); + // Should not have been recorded as updating the GC heap. + assert(!GetMemorySsaMap(GcHeap)->Lookup(storeNode)); + + LclVarDsc* varDsc = lvaGetDesc(lclDefNode); + unsigned lclDefSsaNum = GetSsaNumForLocalVarDef(lclDefNode); + + if (lclDefSsaNum != SsaConfig::RESERVED_SSA_NUM) + { + unsigned lclSize = lvaLclExactSize(lclDefNode->GetLclNum()); + + ValueNumPair newLclValue; + if (vnStore->LoadStoreIsEntire(lclSize, offset, storeSize)) + { + newLclValue = value; + } + else + { + assert((lclDefNode->gtFlags & GTF_VAR_USEASG) != 0); + // The "lclDefNode" node will be labeled with the SSA number of its "use" identity + // (we looked in a side table above for its "def" identity). Look up that value. + ValueNumPair oldLclValue = varDsc->GetPerSsaData(lclDefNode->GetSsaNum())->m_vnPair; + newLclValue = vnStore->VNPairForStore(oldLclValue, lclSize, offset, storeSize, value); + } + + // Any out-of-bounds stores should have made the local address-exposed. + assert(newLclValue.BothDefined()); + + if (normalize) + { + // We normalize types stored in local locations because things outside VN itself look at them. + newLclValue = vnStore->VNPairForLoadStoreBitCast(newLclValue, varDsc->TypeGet(), lclSize); + assert((genActualType(vnStore->TypeOfVN(newLclValue.GetLiberal())) == genActualType(varDsc))); + } + + varDsc->GetPerSsaData(lclDefSsaNum)->m_vnPair = newLclValue; - ValueNum newValAtInx = ValueNumStore::NoVN; + JITDUMP("Tree [%06u] assigned VN to local var V%02u/%d: ", dspTreeID(storeNode), lclDefNode->GetLclNum(), + lclDefSsaNum); + JITDUMPEXEC(vnpPrint(newLclValue, 1)); + JITDUMP("\n"); + } + else if (varDsc->IsAddressExposed()) + { + ValueNum heapVN = vnStore->VNForExpr(compCurBB, TYP_HEAP); + recordAddressExposedLocalStore(storeNode, heapVN DEBUGARG("local assign")); + } + else + { + JITDUMP("Tree [%06u] assigns to non-address-taken local var V%02u; excluded from SSA, so value not tracked\n", + dspTreeID(storeNode), lclDefNode->GetLclNum()); + } +} + +//------------------------------------------------------------------------ +// fgValueNumberArrayElemLoad: Value number a load from an array element. +// +// Arguments: +// loadTree - The indirection tree performing the load +// addrFunc - The "VNF_PtrToArrElem" function representing the address +// +// Notes: +// Only assigns normal VNs to "loadTree". +// +void Compiler::fgValueNumberArrayElemLoad(GenTree* loadTree, VNFuncApp* addrFunc) +{ + assert(loadTree->OperIsIndir() && (addrFunc->m_func == VNF_PtrToArrElem)); + + CORINFO_CLASS_HANDLE elemTypeEq = CORINFO_CLASS_HANDLE(vnStore->ConstantValue(addrFunc->m_args[0])); + ValueNum arrVN = addrFunc->m_args[1]; + ValueNum inxVN = addrFunc->m_args[2]; + VNFuncApp offsFunc; + vnStore->GetVNFunc(addrFunc->m_args[3], &offsFunc); + FieldSeqNode* fieldSeq = vnStore->FieldSeqVNToFieldSeq(offsFunc.m_args[0]); + ssize_t offset = vnStore->ConstantValue(offsFunc.m_args[1]); + + // TODO-PhysicalVN: with the physical VN scheme, we no longer need the field sequence checks. + if (fieldSeq == FieldSeqStore::NotAField()) + { + loadTree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, loadTree->TypeGet())); + return; + } + + // The VN inputs are required to be non-exceptional values. + assert(arrVN == vnStore->VNNormalValue(arrVN)); + assert(inxVN == vnStore->VNNormalValue(inxVN)); + + // Heap[elemTypeEq][arrVN][inx][offset + size]. + var_types elemType = DecodeElemType(elemTypeEq); + ValueNum elemTypeEqVN = vnStore->VNForHandle(ssize_t(elemTypeEq), GTF_ICON_CLASS_HDL); + JITDUMP(" Array element load: elemTypeEq is " FMT_VN " for %s[]\n", elemTypeEqVN, + (elemType == TYP_STRUCT) ? eeGetClassName(elemTypeEq) : varTypeName(elemType)); + + ValueNum hAtArrType = vnStore->VNForMapSelect(VNK_Liberal, TYP_MEM, fgCurMemoryVN[GcHeap], elemTypeEqVN); + JITDUMP(" GcHeap[elemTypeEq: " FMT_VN "] is " FMT_VN "\n", elemTypeEqVN, hAtArrType); + + ValueNum hAtArrTypeAtArr = vnStore->VNForMapSelect(VNK_Liberal, TYP_MEM, hAtArrType, arrVN); + JITDUMP(" GcHeap[elemTypeEq][array: " FMT_VN "] is " FMT_VN "\n", arrVN, hAtArrTypeAtArr); + + ValueNum wholeElem = vnStore->VNForMapSelect(VNK_Liberal, elemType, hAtArrTypeAtArr, inxVN); + JITDUMP(" GcHeap[elemTypeEq][array][index: " FMT_VN "] is " FMT_VN "\n", inxVN, wholeElem); + + unsigned elemSize = (elemType == TYP_STRUCT) ? info.compCompHnd->getClassSize(elemTypeEq) : genTypeSize(elemType); + var_types loadType = loadTree->TypeGet(); + unsigned loadSize = loadTree->AsIndir()->Size(); + ValueNum loadValueVN = vnStore->VNForLoad(VNK_Liberal, wholeElem, elemSize, loadType, offset, loadSize); + + loadTree->gtVNPair.SetLiberal(loadValueVN); + loadTree->gtVNPair.SetConservative(vnStore->VNForExpr(compCurBB, loadType)); +} + +//------------------------------------------------------------------------ +// fgValueNumberArrayElemStore: Update the current heap state after a store +// to an array element. +// +// Arguments: +// storeNode - The ASG node performing the store +// addrFunc - The "VNF_PtrToArrElem" function representing the address +// storeSize - The number of bytes being stored +// value - (VN of) the value being stored +// +void Compiler::fgValueNumberArrayElemStore(GenTree* storeNode, VNFuncApp* addrFunc, unsigned storeSize, ValueNum value) +{ + assert(addrFunc->m_func == VNF_PtrToArrElem); + + CORINFO_CLASS_HANDLE elemTypeEq = CORINFO_CLASS_HANDLE(vnStore->ConstantValue(addrFunc->m_args[0])); + ValueNum arrVN = addrFunc->m_args[1]; + ValueNum inxVN = addrFunc->m_args[2]; + VNFuncApp offsFunc; + vnStore->GetVNFunc(addrFunc->m_args[3], &offsFunc); + FieldSeqNode* fldSeq = vnStore->FieldSeqVNToFieldSeq(offsFunc.m_args[0]); + ssize_t offset = vnStore->ConstantValue(offsFunc.m_args[1]); + + bool invalidateArray = false; + var_types elemType = DecodeElemType(elemTypeEq); + ValueNum elemTypeEqVN = vnStore->VNForHandle(ssize_t(elemTypeEq), GTF_ICON_CLASS_HDL); + JITDUMP(" Array element store: elemTypeEq is " FMT_VN " for %s[]\n", elemTypeEqVN, + (elemType == TYP_STRUCT) ? eeGetClassName(elemTypeEq) : varTypeName(elemType)); + + ValueNum hAtArrType = vnStore->VNForMapSelect(VNK_Liberal, TYP_MEM, fgCurMemoryVN[GcHeap], elemTypeEqVN); + JITDUMP(" GcHeap[elemTypeEq: " FMT_VN "] is " FMT_VN "\n", elemTypeEqVN, hAtArrType); + + ValueNum hAtArrTypeAtArr = vnStore->VNForMapSelect(VNK_Liberal, TYP_MEM, hAtArrType, arrVN); + JITDUMP(" GcHeap[elemTypeEq][array: " FMT_VN "] is " FMT_VN "\n", arrVN, hAtArrTypeAtArr); + + ValueNum newWholeElem = ValueNumStore::NoVN; ValueNum newValAtArr = ValueNumStore::NoVN; ValueNum newValAtArrType = ValueNumStore::NoVN; + // TODO-PhysicalVN: with the physical VN scheme, we no longer need the field sequence checks. if (fldSeq == FieldSeqStore::NotAField()) { // This doesn't represent a proper array access - JITDUMP(" *** NotAField sequence encountered in fgValueNumberArrIndexAssign\n"); + JITDUMP(" *** NotAField sequence encountered in fgValueNumberArrayElemStore\n"); // Store a new unique value for newValAtArrType newValAtArrType = vnStore->VNForExpr(compCurBB, TYP_MEM); @@ -4400,21 +4724,30 @@ ValueNum Compiler::fgValueNumberArrIndexAssign(CORINFO_CLASS_HANDLE elemTypeEq, } else { - // Note that this does the right thing if "fldSeq" is null -- returns last "rhs" argument. + unsigned elemSize = + (elemType == TYP_STRUCT) ? info.compCompHnd->getClassSize(elemTypeEq) : genTypeSize(elemType); + // This is the value that should be stored at "arr[inx]". - newValAtInx = vnStore->VNApplySelectorsAssign(VNK_Liberal, hAtArrTypeAtArrAtInx, fldSeq, rhsVN, indType); + if (vnStore->LoadStoreIsEntire(elemSize, offset, storeSize)) + { + // For memory locations (as opposed to locals), we do not normalize types. + newWholeElem = value; + } + else + { + ValueNum oldWholeElem = vnStore->VNForMapSelect(VNK_Liberal, elemType, hAtArrTypeAtArr, inxVN); + JITDUMP(" GcHeap[elemTypeEq][array][index: " FMT_VN "] is " FMT_VN "\n", inxVN, oldWholeElem); + + newWholeElem = vnStore->VNForStore(oldWholeElem, elemSize, offset, storeSize, value); + } - // TODO-VNTypes: the validation below is a workaround for logic in ApplySelectorsAssignTypeCoerce - // not handling some cases correctly. Remove once ApplySelectorsAssignTypeCoerce has been fixed. - var_types arrElemFldType = - (fldSeq != nullptr) ? eeGetFieldType(fldSeq->GetTail()->GetFieldHandle()) : arrElemType; + // TODO-PhysicalVN: with the physical VN scheme, we no longer need the type check below. + var_types arrElemFldType = (fldSeq != nullptr) ? eeGetFieldType(fldSeq->GetTail()->GetFieldHandle()) : elemType; - if (indType != arrElemFldType) + if ((storeNode->gtGetOp1()->TypeGet() != arrElemFldType) || (newWholeElem == ValueNumStore::NoVN)) { // Mismatched types: Store between different types (indType into array of arrElemFldType) - // - - JITDUMP(" *** Mismatched types in fgValueNumberArrIndexAssign\n"); + JITDUMP(" *** Mismatched types in fgValueNumberArrayElemStore\n"); // Store a new unique value for newValAtArrType newValAtArrType = vnStore->VNForExpr(compCurBB, TYP_MEM); @@ -4424,149 +4757,155 @@ ValueNum Compiler::fgValueNumberArrIndexAssign(CORINFO_CLASS_HANDLE elemTypeEq, if (!invalidateArray) { - newValAtArr = vnStore->VNForMapStore(hAtArrTypeAtArr, inxVN, newValAtInx); + JITDUMP(" GcHeap[elemTypeEq][array][index: " FMT_VN "] = " FMT_VN ":\n", inxVN, newWholeElem); + newValAtArr = vnStore->VNForMapStore(hAtArrTypeAtArr, inxVN, newWholeElem); + + JITDUMP(" GcHeap[elemTypeEq][array: " FMT_VN "] = " FMT_VN ":\n", arrVN, newValAtArr); newValAtArrType = vnStore->VNForMapStore(hAtArrType, arrVN, newValAtArr); } -#ifdef DEBUG - if (verbose) - { - printf(" hAtArrType " FMT_VN " is MapSelect(curGcHeap(" FMT_VN "), ", hAtArrType, fgCurMemoryVN[GcHeap]); + JITDUMP(" GcHeap[elemTypeEq: " FMT_VN "] = " FMT_VN ":\n", elemTypeEqVN, newValAtArrType); + ValueNum newHeapVN = vnStore->VNForMapStore(fgCurMemoryVN[GcHeap], elemTypeEqVN, newValAtArrType); - if (arrElemType == TYP_STRUCT) - { - printf("%s[]).\n", eeGetClassName(elemTypeEq)); - } - else - { - printf("%s[]).\n", varTypeName(arrElemType)); - } - printf(" hAtArrTypeAtArr " FMT_VN " is MapSelect(hAtArrType(" FMT_VN "), arr=" FMT_VN ")\n", hAtArrTypeAtArr, - hAtArrType, arrVN); - printf(" hAtArrTypeAtArrAtInx " FMT_VN " is MapSelect(hAtArrTypeAtArr(" FMT_VN "), inx=" FMT_VN "):%s\n", - hAtArrTypeAtArrAtInx, hAtArrTypeAtArr, inxVN, ValueNumStore::VNMapTypeName(arrElemType)); + recordGcHeapStore(storeNode, newHeapVN DEBUGARG("array element store")); +} - if (!invalidateArray) - { - printf(" newValAtInd " FMT_VN " is ", newValAtInx); - vnStore->vnDump(this, newValAtInx); - printf("\n"); +//------------------------------------------------------------------------ +// fgValueNumberFieldLoad: Value number a class/static field load. +// +// Arguments: +// loadTree - The indirection tree performing the load +// baseAddr - The "base address" of the field (see "GenTree::IsFieldAddr") +// fieldSeq - The field sequence representing the address +// offset - The offset, relative to the field, being loaded from +// +// Notes: +// Only assigns normal VNs to "loadTree". +// +void Compiler::fgValueNumberFieldLoad(GenTree* loadTree, GenTree* baseAddr, FieldSeqNode* fieldSeq, ssize_t offset) +{ + assert(fieldSeq != nullptr); - printf(" newValAtArr " FMT_VN " is ", newValAtArr); - vnStore->vnDump(this, newValAtArr); - printf("\n"); - } + if (fieldSeq == FieldSeqStore::NotAField()) + { + loadTree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, loadTree->TypeGet())); + return; + } - printf(" newValAtArrType " FMT_VN " is ", newValAtArrType); - vnStore->vnDump(this, newValAtArrType); - printf("\n"); + // Two cases: + // + // 1) Instance field / "complex" static: heap[field][baseAddr][offset + load size]. + // 2) "Simple" static: heap[field][offset + load size]. + // + var_types fieldType; + unsigned fieldSize; + ValueNum fieldSelectorVN = vnStore->VNForFieldSelector(fieldSeq->GetFieldHandle(), &fieldType, &fieldSize); + + ValueNum fieldMapVN = ValueNumStore::NoVN; + ValueNum fieldValueSelectorVN = ValueNumStore::NoVN; + if (baseAddr != nullptr) + { + fieldMapVN = vnStore->VNForMapSelect(VNK_Liberal, TYP_MEM, fgCurMemoryVN[GcHeap], fieldSelectorVN); + fieldValueSelectorVN = vnStore->VNLiberalNormalValue(baseAddr->gtVNPair); + } + else + { + fieldMapVN = fgCurMemoryVN[GcHeap]; + fieldValueSelectorVN = fieldSelectorVN; } -#endif // DEBUG - return vnStore->VNForMapStore(fgCurMemoryVN[GcHeap], elemTypeEqVN, newValAtArrType); -} + ValueNum fieldValueVN = vnStore->VNForMapSelect(VNK_Liberal, fieldType, fieldMapVN, fieldValueSelectorVN); -ValueNum Compiler::fgValueNumberArrIndexVal(GenTree* tree, VNFuncApp* pFuncApp, ValueNumPair addrXvnp) -{ - assert(vnStore->IsVNHandle(pFuncApp->m_args[0])); - CORINFO_CLASS_HANDLE arrElemTypeEQ = CORINFO_CLASS_HANDLE(vnStore->ConstantValue(pFuncApp->m_args[0])); - ValueNum arrVN = pFuncApp->m_args[1]; - ValueNum inxVN = pFuncApp->m_args[2]; - FieldSeqNode* fldSeq = vnStore->FieldSeqVNToFieldSeq(pFuncApp->m_args[3]); - return fgValueNumberArrIndexVal(tree, arrElemTypeEQ, arrVN, inxVN, addrXvnp, fldSeq); + // Finally, account for the struct fields and type mismatches. + var_types loadType = loadTree->TypeGet(); + unsigned loadSize = loadTree->OperIsBlk() ? loadTree->AsBlk()->Size() : genTypeSize(loadTree); + ValueNum loadValueVN = vnStore->VNForLoad(VNK_Liberal, fieldValueVN, fieldSize, loadType, offset, loadSize); + + loadTree->gtVNPair.SetLiberal(loadValueVN); + loadTree->gtVNPair.SetConservative(vnStore->VNForExpr(compCurBB, loadType)); } -ValueNum Compiler::fgValueNumberArrIndexVal(GenTree* tree, - CORINFO_CLASS_HANDLE elemTypeEq, - ValueNum arrVN, - ValueNum inxVN, - ValueNumPair addrXvnp, - FieldSeqNode* fldSeq) +//------------------------------------------------------------------------ +// fgValueNumberFieldStore: Update the current heap state after a store to +// a class/static field. +// +// Arguments: +// storeNode - The ASG node performing the store +// baseAddr - The "base address" of the field (see "GenTree::IsFieldAddr") +// fieldSeq - The field sequence representing the address +// offset - The offset, relative to the field, of the target location +// storeSize - The number of bytes being stored +// value - The value being stored +// +void Compiler::fgValueNumberFieldStore( + GenTree* storeNode, GenTree* baseAddr, FieldSeqNode* fieldSeq, ssize_t offset, unsigned storeSize, ValueNum value) { - assert(tree == nullptr || tree->OperIsIndir()); + assert(fieldSeq != nullptr); - // The VN inputs are required to be non-exceptional values. - assert(arrVN == vnStore->VNNormalValue(arrVN)); - assert(inxVN == vnStore->VNNormalValue(inxVN)); - - var_types elemTyp = DecodeElemType(elemTypeEq); - var_types indType = (tree == nullptr) ? elemTyp : tree->TypeGet(); - ValueNum selectedElem; - unsigned elemWidth = elemTyp == TYP_STRUCT ? info.compCompHnd->getClassSize(elemTypeEq) : genTypeSize(elemTyp); - - if ((fldSeq == FieldSeqStore::NotAField()) || (genTypeSize(indType) > elemWidth)) + if (fieldSeq == FieldSeqStore::NotAField()) { - // This doesn't represent a proper array access - JITDUMP(" *** Not a proper arrray access encountered in fgValueNumberArrIndexVal\n"); - - // a new unique value number - selectedElem = vnStore->VNForExpr(compCurBB, indType); - -#ifdef DEBUG - if (verbose) - { - printf(" IND of PtrToArrElem is unique VN " FMT_VN ".\n", selectedElem); - } -#endif // DEBUG + fgMutateGcHeap(storeNode DEBUGARG("NotAField field store")); + return; + } - if (tree != nullptr) - { - tree->gtVNPair = vnStore->VNPWithExc(ValueNumPair(selectedElem, selectedElem), addrXvnp); - } + // Two cases: + // 1) Instance field / "complex" static: heap[field][baseAddr][offset + load size] = value. + // 2) "Simple" static: heap[field][offset + load size] = value. + // + unsigned fieldSize; + var_types fieldType; + ValueNum fieldSelectorVN = vnStore->VNForFieldSelector(fieldSeq->GetFieldHandle(), &fieldType, &fieldSize); + + ValueNum fieldMapVN = ValueNumStore::NoVN; + ValueNum fieldValueSelectorVN = ValueNumStore::NoVN; + if (baseAddr != nullptr) + { + // Construct the "field map" VN. It represents memory state of the first field of all objects + // on the heap. This is our primary map. + fieldMapVN = vnStore->VNForMapSelect(VNK_Liberal, TYP_MEM, fgCurMemoryVN[GcHeap], fieldSelectorVN); + fieldValueSelectorVN = vnStore->VNLiberalNormalValue(baseAddr->gtVNPair); } else { - ValueNum elemTypeEqVN = vnStore->VNForHandle(ssize_t(elemTypeEq), GTF_ICON_CLASS_HDL); - ValueNum hAtArrType = vnStore->VNForMapSelect(VNK_Liberal, TYP_MEM, fgCurMemoryVN[GcHeap], elemTypeEqVN); - ValueNum hAtArrTypeAtArr = vnStore->VNForMapSelect(VNK_Liberal, TYP_MEM, hAtArrType, arrVN); - ValueNum wholeElem = vnStore->VNForMapSelect(VNK_Liberal, elemTyp, hAtArrTypeAtArr, inxVN); - -#ifdef DEBUG - if (verbose) - { - printf(" hAtArrType " FMT_VN " is MapSelect(curGcHeap(" FMT_VN "), ", hAtArrType, fgCurMemoryVN[GcHeap]); - if (elemTyp == TYP_STRUCT) - { - printf("%s[]).\n", eeGetClassName(elemTypeEq)); - } - else - { - printf("%s[]).\n", varTypeName(elemTyp)); - } + fieldMapVN = fgCurMemoryVN[GcHeap]; + fieldValueSelectorVN = fieldSelectorVN; + } - printf(" hAtArrTypeAtArr " FMT_VN " is MapSelect(hAtArrType(" FMT_VN "), arr=" FMT_VN ").\n", - hAtArrTypeAtArr, hAtArrType, arrVN); + ValueNum newFieldValueVN = ValueNumStore::NoVN; + if (vnStore->LoadStoreIsEntire(fieldSize, offset, storeSize)) + { + // For memory locations (as opposed to locals), we do not normalize types. + newFieldValueVN = value; + } + else + { + ValueNum oldFieldValueVN = vnStore->VNForMapSelect(VNK_Liberal, fieldType, fieldMapVN, fieldValueSelectorVN); + newFieldValueVN = vnStore->VNForStore(oldFieldValueVN, fieldSize, offset, storeSize, value); + } - printf(" wholeElem " FMT_VN " is MapSelect(hAtArrTypeAtArr(" FMT_VN "), ind=" FMT_VN ").\n", wholeElem, - hAtArrTypeAtArr, inxVN); - } -#endif // DEBUG + if (newFieldValueVN != ValueNumStore::NoVN) + { + // Construct the new field map... + ValueNum newFieldMapVN = vnStore->VNForMapStore(fieldMapVN, fieldValueSelectorVN, newFieldValueVN); - selectedElem = wholeElem; - size_t elemStructSize = 0; - if (fldSeq) + // ...and a new value for the heap. + ValueNum newHeapVN = ValueNumStore::NoVN; + if (baseAddr != nullptr) { - selectedElem = vnStore->VNApplySelectors(VNK_Liberal, wholeElem, fldSeq, &elemStructSize); - elemTyp = vnStore->TypeOfVN(selectedElem); + newHeapVN = vnStore->VNForMapStore(fgCurMemoryVN[GcHeap], fieldSelectorVN, newFieldMapVN); } - selectedElem = vnStore->VNApplySelectorsTypeCheck(selectedElem, indType, elemStructSize); - selectedElem = vnStore->VNWithExc(selectedElem, addrXvnp.GetLiberal()); - -#ifdef DEBUG - if (verbose && (selectedElem != wholeElem)) + else { - printf(" selectedElem is " FMT_VN " after applying selectors.\n", selectedElem); + newHeapVN = newFieldMapVN; } -#endif // DEBUG - if (tree != nullptr) - { - tree->gtVNPair.SetLiberal(selectedElem); - tree->gtVNPair.SetConservative(vnStore->VNUniqueWithExc(tree->TypeGet(), addrXvnp.GetConservative())); - } + recordGcHeapStore(storeNode, newHeapVN DEBUGARG("StoreField")); + } + else + { + // For out-of-bounds stores, the heap has to be invalidated as other fields may be affected. + fgMutateGcHeap(storeNode DEBUGARG("out-of-bounds store to a field")); } - - return selectedElem; } ValueNum Compiler::fgValueNumberByrefExposedLoad(var_types type, ValueNum pointerVN) @@ -4590,7 +4929,7 @@ ValueNum Compiler::fgValueNumberByrefExposedLoad(var_types type, ValueNum pointe } } -var_types ValueNumStore::TypeOfVN(ValueNum vn) +var_types ValueNumStore::TypeOfVN(ValueNum vn) const { if (vn == NoVN) { @@ -6166,6 +6505,9 @@ void ValueNumStore::vnDump(Compiler* comp, ValueNum vn, bool isPtr) case VNF_MapStore: vnDumpMapStore(comp, &funcApp); break; + case VNF_MapPhysicalStore: + vnDumpMapPhysicalStore(comp, &funcApp); + break; case VNF_ValWithExc: vnDumpValWithExc(comp, &funcApp); break; @@ -6181,6 +6523,9 @@ void ValueNumStore::vnDump(Compiler* comp, ValueNum vn, bool isPtr) case VNF_CastOvf: vnDumpCast(comp, vn); break; + case VNF_BitCast: + vnDumpBitCast(comp, &funcApp); + break; case VNF_ZeroObj: vnDumpZeroObj(comp, &funcApp); break; @@ -6297,6 +6642,38 @@ void ValueNumStore::vnDumpMapStore(Compiler* comp, VNFuncApp* mapStore) } } +void ValueNumStore::vnDumpPhysicalSelector(ValueNum selector) +{ + unsigned size; + unsigned offset = DecodePhysicalSelector(selector, &size); + + if (size == 1) + { + printf("[%u]", offset); + } + else + { + printf("[%u:%u]", offset, offset + size - 1); + } +} + +void ValueNumStore::vnDumpMapPhysicalStore(Compiler* comp, VNFuncApp* mapPhysicalStore) +{ + ValueNum mapVN = mapPhysicalStore->m_args[0]; + ValueNum selector = mapPhysicalStore->m_args[1]; + ValueNum valueVN = mapPhysicalStore->m_args[2]; + + unsigned size; + unsigned offset = DecodePhysicalSelector(selector, &size); + unsigned endOffset = offset + size; + + comp->vnPrint(mapVN, 0); + vnDumpPhysicalSelector(selector); + printf(" := "); + comp->vnPrint(valueVN, 0); + printf("]"); +} + void ValueNumStore::vnDumpMemOpaque(Compiler* comp, VNFuncApp* memOpaque) { assert(memOpaque->m_func == VNF_MemOpaque); // Precondition. @@ -6360,6 +6737,16 @@ void ValueNumStore::vnDumpCast(Compiler* comp, ValueNum castVN) } } +void ValueNumStore::vnDumpBitCast(Compiler* comp, VNFuncApp* bitCast) +{ + var_types srcType = TypeOfVN(bitCast->m_args[0]); + var_types castToType = static_cast(ConstantValue(bitCast->m_args[1])); + + printf("BitCast<%s <- %s>(", varTypeName(castToType), varTypeName(srcType)); + comp->vnPrint(bitCast->m_args[0], 0); + printf(")"); +} + void ValueNumStore::vnDumpZeroObj(Compiler* comp, VNFuncApp* zeroObj) { printf("ZeroObj("); @@ -6528,12 +6915,11 @@ const char* ValueNumStore::VNFuncNameArr[] = { } static const char* s_reservedNameArr[] = { - "$VN.Recursive", // -2 RecursiveVN - "$VN.No", // -1 NoVN - "$VN.Null", // 0 VNForNull() - "$VN.ReadOnlyHeap", // 1 VNForROH() - "$VN.Void", // 2 VNForVoid() - "$VN.EmptyExcSet" // 3 VNForEmptyExcSet() + "$VN.Recursive", // -2 RecursiveVN + "$VN.No", // -1 NoVN + "$VN.Null", // 0 VNForNull() + "$VN.Void", // 1 VNForVoid() + "$VN.EmptyExcSet" // 2 VNForEmptyExcSet() }; // Returns the string name of "vn" when it is a reserved value number, nullptr otherwise @@ -7651,6 +8037,7 @@ void Compiler::fgValueNumberAssignment(GenTreeOp* tree) ValueNumPair rhsExcSet; ValueNumPair rhsVNPair; vnStore->VNPUnpackExc(rhs->gtVNPair, &rhsVNPair, &rhsExcSet); + assert(rhsVNPair.BothDefined()); // Is the type being stored different from the type computed by the rhs? if (rhs->TypeGet() != lhs->TypeGet()) @@ -7680,106 +8067,29 @@ void Compiler::fgValueNumberAssignment(GenTreeOp* tree) { case GT_LCL_VAR: { - GenTreeLclVarCommon* lcl = lhs->AsLclVarCommon(); - unsigned lclDefSsaNum = GetSsaNumForLocalVarDef(lcl); - - // Should not have been recorded as updating the GC heap. - assert(!GetMemorySsaMap(GcHeap)->Lookup(tree)); - - if (lclDefSsaNum != SsaConfig::RESERVED_SSA_NUM) - { - // Should not have been recorded as updating ByrefExposed mem. - assert(!GetMemorySsaMap(ByrefExposed)->Lookup(tree)); - assert(rhsVNPair.BothDefined()); - - lvaTable[lcl->GetLclNum()].GetPerSsaData(lclDefSsaNum)->m_vnPair = rhsVNPair; - - JITDUMP("Tree [%06u] assigned VN to local var V%02u/%d: ", dspTreeID(tree), lcl->GetLclNum(), - lclDefSsaNum); - JITDUMPEXEC(vnpPrint(rhsVNPair, 1)); - JITDUMP("\n"); - } - else if (lvaVarAddrExposed(lcl->GetLclNum())) - { - // We could use MapStore here and MapSelect on reads of address-exposed locals - // (using the local nums as selectors) to get e.g. propagation of values - // through address-taken locals in regions of code with no calls or byref - // writes. - // For now, just use a new opaque VN. - ValueNum heapVN = vnStore->VNForExpr(compCurBB, TYP_HEAP); - recordAddressExposedLocalStore(tree, heapVN DEBUGARG("local assign")); - } - else - { - JITDUMP("Tree [%06u] assigns to non-address-taken local var V%02u; excluded from SSA, so value not" - "tracked.\n", - dspTreeID(tree), lcl->GetLclNum()); - } + GenTreeLclVarCommon* lcl = lhs->AsLclVarCommon(); + fgValueNumberLocalStore(tree, lcl, 0, lvaLclExactSize(lcl->GetLclNum()), rhsVNPair, /* normalize */ false); } break; + case GT_LCL_FLD: { - GenTreeLclFld* lclFld = lhs->AsLclFld(); - unsigned lclDefSsaNum = GetSsaNumForLocalVarDef(lclFld); - - // Should not have been recorded as updating the GC heap. - assert(!GetMemorySsaMap(GcHeap)->Lookup(tree)); - - if (lclDefSsaNum != SsaConfig::RESERVED_SSA_NUM) - { - ValueNumPair newLhsVNPair; - // Is this a full definition? - if ((lclFld->gtFlags & GTF_VAR_USEASG) == 0) - { - assert(!lclFld->IsPartialLclFld(this)); - assert(rhsVNPair.GetLiberal() != ValueNumStore::NoVN); - newLhsVNPair = rhsVNPair; - } - else - { - // We should never have a null field sequence here. - assert(lclFld->GetFieldSeq() != nullptr); - if (lclFld->GetFieldSeq() == FieldSeqStore::NotAField()) - { - // We don't know what field this represents. Assign a new VN to the whole variable - // (since we may be writing to an unknown portion of it.) - newLhsVNPair.SetBoth(vnStore->VNForExpr(compCurBB, lvaGetActualType(lclFld->GetLclNum()))); - } - else - { - // We do know the field sequence. - // The "lclFld" node will be labeled with the SSA number of its "use" identity - // (we looked in a side table above for its "def" identity). Look up that value. - ValueNumPair oldLhsVNPair = - lvaTable[lclFld->GetLclNum()].GetPerSsaData(lclFld->GetSsaNum())->m_vnPair; - newLhsVNPair = vnStore->VNPairApplySelectorsAssign(oldLhsVNPair, lclFld->GetFieldSeq(), - rhsVNPair, // Pre-value. - lclFld->TypeGet()); - } - } - - lvaTable[lclFld->GetLclNum()].GetPerSsaData(lclDefSsaNum)->m_vnPair = newLhsVNPair; + GenTreeLclFld* lclFld = lhs->AsLclFld(); + unsigned storeOffset = lclFld->GetLclOffs(); + unsigned storeSize = lclFld->GetSize(); - JITDUMP("Tree [%06u] assigned VN to local var V%02u/%d: ", dspTreeID(tree), lclFld->GetLclNum(), - lclDefSsaNum); - JITDUMPEXEC(vnpPrint(newLhsVNPair, 1)); - JITDUMP("\n"); - } - else if (lvaVarAddrExposed(lclFld->GetLclNum())) - { - // This side-effects ByrefExposed. Just use a new opaque VN. - // As with GT_LCL_VAR, we could probably use MapStore here and MapSelect at corresponding - // loads, but to do so would have to identify the subset of address-exposed locals - // whose fields can be disambiguated. - ValueNum heapVN = vnStore->VNForExpr(compCurBB, TYP_HEAP); - recordAddressExposedLocalStore(tree, heapVN DEBUGARG("local field assign")); - } - else + // TODO-PhysicalVN: with the physical VN scheme, we no longer need the field sequence checks. + if ((lclFld->GetFieldSeq() == FieldSeqStore::NotAField()) && ((lclFld->gtFlags & GTF_VAR_USEASG) != 0)) { - JITDUMP("Tree [%06u] assigns to non-address-taken local var V%02u; excluded from SSA, so value not" - "tracked.\n", - dspTreeID(tree), lclFld->GetLclNum()); + // Invalidate the whole local. + rhsVNPair.SetBoth(vnStore->VNForExpr(compCurBB, lvaGetDesc(lclFld)->TypeGet())); + storeOffset = 0; + storeSize = lvaLclExactSize(lclFld->GetLclNum()); } + + // TODO-PhysicalVN: remove this quirk, it was added to avoid diffs. + bool normalize = ((lclFld->gtFlags & GTF_VAR_USEASG) != 0); + fgValueNumberLocalStore(tree, lclFld, storeOffset, storeSize, rhsVNPair, normalize); } break; @@ -7799,249 +8109,69 @@ void Compiler::fgValueNumberAssignment(GenTreeOp* tree) tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, lhs->TypeGet())); } - GenTree* arg = lhs->AsOp()->gtOp1; - - // Indicates whether the argument of the IND is the address of a local. - bool wasLocal = false; - + GenTree* arg = lhs->AsOp()->gtOp1; VNFuncApp funcApp; - ValueNum argVN = arg->gtVNPair.GetLiberal(); + ValueNum argVN = arg->gtVNPair.GetLiberal(); + bool argIsVNFunc = vnStore->GetVNFunc(vnStore->VNNormalValue(argVN), &funcApp); - bool argIsVNFunc = vnStore->GetVNFunc(vnStore->VNNormalValue(argVN), &funcApp); + GenTreeLclVarCommon* lclVarTree = nullptr; + bool isEntire = false; + ssize_t offset = 0; + unsigned storeSize = lhs->AsIndir()->Size(); + GenTree* baseAddr = nullptr; + FieldSeqNode* fldSeq = nullptr; - // Is this an assignment to a (field of, perhaps) a local? - // If it is a PtrToLoc, lib and cons VNs will be the same. - if (argIsVNFunc) + if (argIsVNFunc && (funcApp.m_func == VNF_PtrToStatic)) { - if (funcApp.m_func == VNF_PtrToLoc) - { - assert(arg->gtVNPair.BothEqual()); // If it's a PtrToLoc, lib/cons shouldn't differ. - assert(vnStore->IsVNConstant(funcApp.m_args[0])); - unsigned lclNum = vnStore->ConstantValue(funcApp.m_args[0]); + baseAddr = nullptr; // All VNF_PtrToStatic statics are currently "simple". + fldSeq = vnStore->FieldSeqVNToFieldSeq(funcApp.m_args[1]); + offset = vnStore->ConstantValue(funcApp.m_args[2]); - wasLocal = true; - - bool wasInSsa = false; - if (lvaInSsa(lclNum)) - { - FieldSeqNode* fieldSeq = vnStore->FieldSeqVNToFieldSeq(funcApp.m_args[1]); - - // Either "arg" is the address of (part of) a local itself, or else we have - // a "rogue" PtrToLoc, one that should have made the local in question - // address-exposed. Assert on that. - GenTreeLclVarCommon* lclVarTree = nullptr; - bool isEntire = false; - - if (arg->DefinesLocalAddr(this, genTypeSize(lhs->TypeGet()), &lclVarTree, &isEntire) && - lclVarTree->HasSsaName()) - { - // The local #'s should agree. - assert(lclNum == lclVarTree->GetLclNum()); - - if (fieldSeq == FieldSeqStore::NotAField()) - { - assert(!isEntire && "did not expect an entire NotAField write."); - // We don't know where we're storing, so give the local a new, unique VN. - // Do this by considering it an "entire" assignment, with an unknown RHS. - isEntire = true; - rhsVNPair.SetBoth(vnStore->VNForExpr(compCurBB, lclVarTree->TypeGet())); - } - else if ((fieldSeq == nullptr) && !isEntire) - { - // It is a partial store of a LCL_VAR without using LCL_FLD. - // Generate a unique VN. - isEntire = true; - rhsVNPair.SetBoth(vnStore->VNForExpr(compCurBB, lclVarTree->TypeGet())); - } - - ValueNumPair newLhsVNPair; - if (isEntire) - { - newLhsVNPair = rhsVNPair; - } - else - { - // Don't use the lclVarTree's VN: if it's a local field, it will - // already be dereferenced by it's field sequence. - ValueNumPair oldLhsVNPair = - lvaTable[lclVarTree->GetLclNum()].GetPerSsaData(lclVarTree->GetSsaNum())->m_vnPair; - newLhsVNPair = vnStore->VNPairApplySelectorsAssign(oldLhsVNPair, fieldSeq, rhsVNPair, - lhs->TypeGet()); - } - - unsigned lclDefSsaNum = GetSsaNumForLocalVarDef(lclVarTree); - - if (lclDefSsaNum != SsaConfig::RESERVED_SSA_NUM) - { - lvaTable[lclNum].GetPerSsaData(lclDefSsaNum)->m_vnPair = newLhsVNPair; - wasInSsa = true; -#ifdef DEBUG - if (verbose) - { - printf("Tree "); - Compiler::printTreeID(tree); - printf(" assigned VN to local var V%02u/%d: VN ", lclNum, lclDefSsaNum); - vnpPrint(newLhsVNPair, 1); - printf("\n"); - } -#endif // DEBUG - } - } - else - { - unreached(); // "Rogue" PtrToLoc, as discussed above. - } - } - - if (!wasInSsa && lvaVarAddrExposed(lclNum)) - { - // Need to record the effect on ByrefExposed. - // We could use MapStore here and MapSelect on reads of address-exposed locals - // (using the local nums as selectors) to get e.g. propagation of values - // through address-taken locals in regions of code with no calls or byref - // writes. - // For now, just use a new opaque VN. - ValueNum heapVN = vnStore->VNForExpr(compCurBB, TYP_HEAP); - recordAddressExposedLocalStore(tree, heapVN DEBUGARG("PtrToLoc indir")); - } - } + fgValueNumberFieldStore(tree, baseAddr, fldSeq, offset, storeSize, rhsVNPair.GetLiberal()); } - - // Was the argument of the GT_IND the address of a local, handled above? - if (!wasLocal) + else if (argIsVNFunc && (funcApp.m_func == VNF_PtrToArrElem)) + { + fgValueNumberArrayElemStore(tree, &funcApp, storeSize, rhsVNPair.GetLiberal()); + } + else if (arg->IsFieldAddr(this, &baseAddr, &fldSeq, &offset)) { - GenTree* baseAddr = nullptr; - FieldSeqNode* fldSeq = nullptr; + assert(fldSeq != FieldSeqStore::NotAField()); + fgValueNumberFieldStore(tree, baseAddr, fldSeq, offset, storeSize, rhsVNPair.GetLiberal()); + } + else if (arg->DefinesLocalAddr(this, storeSize, &lclVarTree, &isEntire, &offset)) + { + unsigned lclNum = lclVarTree->GetLclNum(); + fldSeq = FieldSeqStore::NotAField(); - if (argIsVNFunc && funcApp.m_func == VNF_PtrToStatic) + if (argIsVNFunc && (funcApp.m_func == VNF_PtrToLoc)) { - FieldSeqNode* fldSeq = vnStore->FieldSeqVNToFieldSeq(funcApp.m_args[1]); - assert(fldSeq != nullptr); // We should never see an empty sequence here. - - if (fldSeq != FieldSeqStore::NotAField()) - { - ValueNum newHeapVN = vnStore->VNApplySelectorsAssign(VNK_Liberal, fgCurMemoryVN[GcHeap], fldSeq, - rhsVNPair.GetLiberal(), lhs->TypeGet()); - recordGcHeapStore(tree, newHeapVN DEBUGARG("static field store")); - } - else - { - fgMutateGcHeap(tree DEBUGARG("indirect store at NotAField PtrToStatic address")); - } + assert(arg->gtVNPair.BothEqual()); // If it's a PtrToLoc, lib/cons shouldn't differ. + assert(lclNum == vnStore->ConstantValue(funcApp.m_args[0])); + fldSeq = vnStore->FieldSeqVNToFieldSeq(funcApp.m_args[1]); } - // Is the LHS an array index expression? - else if (argIsVNFunc && funcApp.m_func == VNF_PtrToArrElem) - { - CORINFO_CLASS_HANDLE elemTypeEq = - CORINFO_CLASS_HANDLE(vnStore->ConstantValue(funcApp.m_args[0])); - ValueNum arrVN = funcApp.m_args[1]; - ValueNum inxVN = funcApp.m_args[2]; - FieldSeqNode* fldSeq = vnStore->FieldSeqVNToFieldSeq(funcApp.m_args[3]); -#ifdef DEBUG - if (verbose) - { - printf("Tree "); - Compiler::printTreeID(tree); - printf(" assigns to an array element:\n"); - } -#endif // DEBUG - ValueNum heapVN = fgValueNumberArrIndexAssign(elemTypeEq, arrVN, inxVN, fldSeq, - rhsVNPair.GetLiberal(), lhs->TypeGet()); - recordGcHeapStore(tree, heapVN DEBUGARG("ArrIndexAssign")); - } - else if (arg->IsFieldAddr(this, &baseAddr, &fldSeq)) + // TODO-PhysicalVN: with the physical VN scheme, we no longer need the field sequence checks. + if ((fldSeq == FieldSeqStore::NotAField()) || ((fldSeq == nullptr) && !isEntire)) { - assert((fldSeq != nullptr) && (fldSeq != FieldSeqStore::NotAField())); - - // The value number from the rhs of the assignment - ValueNum storeVal = rhsVNPair.GetLiberal(); - ValueNum newHeapVN = ValueNumStore::NoVN; - - // We will check that the final field in the sequence matches 'indType'. - var_types indType = lhs->TypeGet(); - - if (baseAddr != nullptr) - { - // Instance field / "complex" static: heap[field][baseAddr][struct fields...] = storeVal. - - var_types firstFieldType; - ValueNum firstFieldSelectorVN = - vnStore->VNForFieldSelector(fldSeq->GetFieldHandle(), &firstFieldType); - - // Construct the "field map" VN. It represents memory state of the first field - // of all objects on the heap. This is our primary map. - ValueNum fldMapVN = - vnStore->VNForMapSelect(VNK_Liberal, TYP_MEM, fgCurMemoryVN[GcHeap], firstFieldSelectorVN); - - ValueNum firstFieldValueSelectorVN = vnStore->VNLiberalNormalValue(baseAddr->gtVNPair); - - ValueNum newFirstFieldValueVN = ValueNumStore::NoVN; - // Optimization: avoid traversting the maps for the value of the first field if - // we do not need it, which is the case if the rest of the field sequence is empty. - if (fldSeq->GetNext() == nullptr) - { - newFirstFieldValueVN = vnStore->VNApplySelectorsAssignTypeCoerce(storeVal, indType); - } - else - { - // Construct the ValueNumber for fldMap[baseAddr]. This (struct) - // map represents the specific field we're looking to store to. - ValueNum firstFieldValueVN = vnStore->VNForMapSelect(VNK_Liberal, firstFieldType, fldMapVN, - firstFieldValueSelectorVN); - - // Construct the maps updating the struct fields in the sequence. - newFirstFieldValueVN = - vnStore->VNApplySelectorsAssign(VNK_Liberal, firstFieldValueVN, fldSeq->GetNext(), - storeVal, indType); - } - - // Finally, construct the new field map... - ValueNum newFldMapVN = - vnStore->VNForMapStore(fldMapVN, firstFieldValueSelectorVN, newFirstFieldValueVN); - - // ...and a new value for the heap. - newHeapVN = vnStore->VNForMapStore(fgCurMemoryVN[GcHeap], firstFieldSelectorVN, newFldMapVN); - } - else - { - // "Simple" static: heap[field][struct fields...] = storeVal. - newHeapVN = vnStore->VNApplySelectorsAssign(VNK_Liberal, fgCurMemoryVN[GcHeap], fldSeq, - storeVal, indType); - } - - // Update the GcHeap value. - recordGcHeapStore(tree, newHeapVN DEBUGARG("StoreField")); + rhsVNPair.SetBoth(vnStore->VNForExpr(compCurBB, lclVarTree->TypeGet())); + offset = 0; + storeSize = lvaLclExactSize(lclVarTree->GetLclNum()); } - else - { - GenTreeLclVarCommon* lclVarTree = nullptr; - bool isLocal = tree->DefinesLocal(this, &lclVarTree); - if (isLocal && lvaVarAddrExposed(lclVarTree->GetLclNum())) - { - // Store to address-exposed local; need to record the effect on ByrefExposed. - // We could use MapStore here and MapSelect on reads of address-exposed locals - // (using the local nums as selectors) to get e.g. propagation of values - // through address-taken locals in regions of code with no calls or byref - // writes. - // For now, just use a new opaque VN. - ValueNum memoryVN = vnStore->VNForExpr(compCurBB, TYP_HEAP); - recordAddressExposedLocalStore(tree, memoryVN DEBUGARG("PtrToLoc indir")); - } - else if (!isLocal) - { - // If it doesn't define a local, then it might update GcHeap/ByrefExposed. - // For the new ByrefExposed VN, we could use an operator here like - // VNF_ByrefExposedStore that carries the VNs of the pointer and RHS, then - // at byref loads if the current ByrefExposed VN happens to be - // VNF_ByrefExposedStore with the same pointer VN, we could propagate the - // VN from the RHS to the VN for the load. This would e.g. allow tracking - // values through assignments to out params. For now, just model this - // as an opaque GcHeap/ByrefExposed mutation. - fgMutateGcHeap(tree DEBUGARG("assign-of-IND")); - } - } + fgValueNumberLocalStore(tree, lclVarTree, offset, storeSize, rhsVNPair); + } + else + { + assert(!tree->DefinesLocal(this, &lclVarTree)); + // If it doesn't define a local, then it might update GcHeap/ByrefExposed. + // For the new ByrefExposed VN, we could use an operator here like + // VNF_ByrefExposedStore that carries the VNs of the pointer and RHS, then + // at byref loads if the current ByrefExposed VN happens to be + // VNF_ByrefExposedStore with the same pointer VN, we could propagate the + // VN from the RHS to the VN for the load. This would e.g. allow tracking + // values through assignments to out params. For now, just model this + // as an opaque GcHeap/ByrefExposed mutation. + fgMutateGcHeap(tree DEBUGARG("assign-of-IND")); } // We don't actually evaluate an IND on the LHS, so give it the Void value. @@ -8074,9 +8204,10 @@ void Compiler::fgValueNumberBlockAssignment(GenTree* tree) GenTree* lhs = tree->gtGetOp1(); GenTree* rhs = tree->gtGetOp2(); - GenTreeLclVarCommon* lclVarTree; - bool isEntire; - if (tree->DefinesLocal(this, &lclVarTree, &isEntire)) + GenTreeLclVarCommon* lclVarTree = nullptr; + bool isEntire = false; + ssize_t offset = 0; + if (tree->DefinesLocal(this, &lclVarTree, &isEntire, &offset)) { assert(lclVarTree->gtFlags & GTF_VAR_DEF); // Should not have been recorded as updating the GC heap. @@ -8090,88 +8221,78 @@ void Compiler::fgValueNumberBlockAssignment(GenTree* tree) // SSA names in which to store VN's on defs. We'll yield unique VN's when we read from them. if (lclDefSsaNum != SsaConfig::RESERVED_SSA_NUM) { - FieldSeqNode* lhsFldSeq = nullptr; + unsigned lhsLclSize = lvaLclExactSize(lhsLclNum); - if (lhs->IsLocalExpr(this, &lclVarTree, &lhsFldSeq)) + unsigned storeSize; + FieldSeqNode* lhsFldSeq; + if (lhs->OperIs(GT_LCL_VAR)) + { + storeSize = lhsLclSize; + lhsFldSeq = nullptr; + } + else if (lhs->OperIs(GT_LCL_FLD)) { - noway_assert(lclVarTree->GetLclNum() == lhsLclNum); + storeSize = lhs->AsLclFld()->GetSize(); + lhsFldSeq = lhs->AsLclFld()->GetFieldSeq(); } else { - GenTree* lhsAddr = lhs->AsIndir()->Addr(); + storeSize = lhs->AsIndir()->Size(); - // For addr-of-local expressions, lib/cons shouldn't matter. - assert(lhsAddr->gtVNPair.BothEqual()); - ValueNum lhsAddrVN = lhsAddr->GetVN(VNK_Liberal); + ValueNumPair lhsAddrVNP = lhs->AsIndir()->Addr()->gtVNPair; + VNFuncApp lhsAddrFunc; + bool lhsAddrIsVNFunc = vnStore->GetVNFunc(lhsAddrVNP.GetLiberal(), &lhsAddrFunc); + assert(lhsAddrIsVNFunc && (lhsAddrFunc.m_func == VNF_PtrToLoc) && lhsAddrVNP.BothEqual()); - // Unpack the PtrToLoc value number of the address. - assert(vnStore->IsVNFunc(lhsAddrVN)); - - VNFuncApp lhsAddrFuncApp; - vnStore->GetVNFunc(lhsAddrVN, &lhsAddrFuncApp); - - assert(lhsAddrFuncApp.m_func == VNF_PtrToLoc); - assert(vnStore->IsVNConstant(lhsAddrFuncApp.m_args[0]) && - vnStore->ConstantValue(lhsAddrFuncApp.m_args[0]) == lhsLclNum); - - lhsFldSeq = vnStore->FieldSeqVNToFieldSeq(lhsAddrFuncApp.m_args[1]); + lhsFldSeq = vnStore->FieldSeqVNToFieldSeq(lhsAddrFunc.m_args[1]); } - bool isNewUniq = false; - ValueNumPair newLhsLclVNPair = ValueNumPair(); + ValueNumPair rhsVNPair = ValueNumPair(); if (tree->OperIsInitBlkOp()) { - ValueNum lclVarVN = ValueNumStore::NoVN; + ValueNum initObjVN = ValueNumStore::NoVN; if (isEntire && rhs->IsIntegralConst(0)) { // Note that it is possible to see pretty much any kind of type for the local // (not just TYP_STRUCT) here because of the ASG(BLK(ADDR(LCL_VAR/FLD)), 0) form. - lclVarVN = (lhsVarDsc->TypeGet() == TYP_STRUCT) ? vnStore->VNForZeroObj(lhsVarDsc->GetStructHnd()) - : vnStore->VNZeroForType(lhsVarDsc->TypeGet()); + initObjVN = (lhsVarDsc->TypeGet() == TYP_STRUCT) ? vnStore->VNForZeroObj(lhsVarDsc->GetStructHnd()) + : vnStore->VNZeroForType(lhsVarDsc->TypeGet()); } else { // Non-zero block init is very rare so we'll use a simple, unique VN here. - lclVarVN = vnStore->VNForExpr(compCurBB, lhsVarDsc->TypeGet()); - isNewUniq = true; + initObjVN = vnStore->VNForExpr(compCurBB, lhsVarDsc->TypeGet()); + + // TODO-PhysicalVN: remove this pessimization, it was added to avoid diffs. + // There is no need to invalidate the whole local. + offset = 0; + storeSize = lhsLclSize; } - newLhsLclVNPair.SetBoth(lclVarVN); + rhsVNPair.SetBoth(initObjVN); } else { assert(tree->OperIsCopyBlkOp()); - if (fgValueNumberBlockAssignmentTypeCheck(lhsVarDsc, lhsFldSeq, rhs)) + rhsVNPair = vnStore->VNPNormalPair(rhs->gtVNPair); + + // TODO-PhysicalVN: with the physical VN scheme, we no longer need this check. + if (!fgValueNumberBlockAssignmentTypeCheck(lhsVarDsc, lhsFldSeq, rhs)) { - ValueNumPair rhsVNPair = vnStore->VNPNormalPair(rhs->gtVNPair); - ValueNumPair oldLhsLclVNPair = lhsVarDsc->GetPerSsaData(lclVarTree->GetSsaNum())->m_vnPair; - newLhsLclVNPair = - vnStore->VNPairApplySelectorsAssign(oldLhsLclVNPair, lhsFldSeq, rhsVNPair, lhs->TypeGet()); + // Invalidate the whole local. + rhsVNPair.SetBoth(vnStore->VNForExpr(compCurBB, lhsVarDsc->TypeGet())); + offset = 0; + storeSize = lhsLclSize; } - else + // TODO-PhysicalVN: remove this quirk, it was added to avoid diffs. + else if (lhs->TypeIs(TYP_STRUCT) && (vnStore->TypeOfVN(rhsVNPair.GetLiberal()) != TYP_STRUCT)) { - newLhsLclVNPair.SetBoth(vnStore->VNForExpr(compCurBB, lhsVarDsc->TypeGet())); - isNewUniq = true; + rhsVNPair.SetBoth(vnStore->VNForExpr(compCurBB, lhsVarDsc->TypeGet())); } } - lhsVarDsc->GetPerSsaData(lclDefSsaNum)->m_vnPair = newLhsLclVNPair; - -#ifdef DEBUG - if (verbose) - { - printf("Tree "); - Compiler::printTreeID(tree); - printf(" assigned VN to local var V%02u/%d: ", lhsLclNum, lclDefSsaNum); - if (isNewUniq) - { - printf("new uniq "); - } - vnpPrint(newLhsLclVNPair, 1); - printf("\n"); - } -#endif // DEBUG + fgValueNumberLocalStore(tree, lclVarTree, offset, storeSize, rhsVNPair); } else if (lclVarTree->HasSsaName()) { @@ -8226,35 +8347,16 @@ void Compiler::fgValueNumberBlockAssignment(GenTree* tree) // Whether "src"'s exact type matches that of the destination location. // // Notes: -// Currently this method only handles local destinations, it should be expanded to support more -// locations (static/instance fields, array elements) once/if "fgValueNumberBlockAssignment" -// supports them. +// TODO-PhysicalVN: with the physical VN scheme, this method is no longer needed. // bool Compiler::fgValueNumberBlockAssignmentTypeCheck(LclVarDsc* dstVarDsc, FieldSeqNode* dstFldSeq, GenTree* src) { if (dstFldSeq == FieldSeqStore::NotAField()) { - // We don't have proper field sequence information for the lhs - assume arbitrary aliasing. JITDUMP(" *** Missing field sequence info for Dst/LHS of COPYBLK\n"); return false; } - // With unsafe code or nested structs, we can end up with IR that has - // mismatched struct types on the LHS and RHS. We need to maintain the - // invariant that a node's VN corresponds exactly to its type. Failure - // to do so is a correctness problem. For example: - // - // S1 s1 = { ... }; // s1 = map - // S1.F0 = 0; // s1 = map[F0 := 0] - // S2 s2 = s1; // s2 = map[F0 := 0] (absent below checks) - // s2.F1 = 1; // s2 = map[F0 := 0][F1 := 1] - // s1 = s2; // s1 = map[F0 := 0][F1 := 1] - // - // int r = s1.F0; // map[F0 := 0][F1 := 1][F0] => map[F0 := 0][F0] => 0 - // - // If F1 and F0 physically alias (exist at the same offset, say), the above - // represents an incorrect optimization. - var_types dstLocationType = TYP_UNDEF; CORINFO_CLASS_HANDLE dstLocationStructHnd = NO_CLASS_HANDLE; if (dstFldSeq == nullptr) @@ -8283,18 +8385,12 @@ bool Compiler::fgValueNumberBlockAssignmentTypeCheck(LclVarDsc* dstVarDsc, Field return false; } - // They're equal, and they're primitives. Allow, for now. TYP_SIMD is tentatively - // allowed here as well as, currently, there are no two vector types with public - // fields both that could reasonably alias each other. + // They're equal, and they're primitives. Allow. if (dstLocationType != TYP_STRUCT) { return true; } - // Figure out what the source's type really is. Note that this will miss - // struct fields of struct locals currently ("src" for them is an IND(struct)). - // Note as well that we're relying on the invariant that "node type == node's - // VN type" here (it would be expensive to recover the handle from "src"'s VN). CORINFO_CLASS_HANDLE srcValueStructHnd = gtGetStructHandleIfPresent(src); if (srcValueStructHnd == NO_CLASS_HANDLE) { @@ -8395,7 +8491,7 @@ void Compiler::fgValueNumberTree(GenTree* tree) FieldSeqNode* zeroOffsetFldSeq = nullptr; if ((typ == TYP_BYREF) && GetZeroOffsetFieldMap()->Lookup(tree, &zeroOffsetFldSeq)) { - ValueNum addrExtended = vnStore->ExtendPtrVN(lcl, zeroOffsetFldSeq); + ValueNum addrExtended = vnStore->ExtendPtrVN(lcl, zeroOffsetFldSeq, 0); if (addrExtended != ValueNumStore::NoVN) { assert(lcl->gtVNPair.BothEqual()); @@ -8415,32 +8511,32 @@ void Compiler::fgValueNumberTree(GenTree* tree) case GT_LCL_FLD: { GenTreeLclFld* lclFld = tree->AsLclFld(); - assert(!lvaInSsa(lclFld->GetLclNum()) || (lclFld->GetFieldSeq() != nullptr)); // If this is a (full or partial) def we skip; it will be handled as part of the assignment. if ((lclFld->gtFlags & GTF_VAR_DEF) == 0) { - unsigned ssaNum = lclFld->GetSsaNum(); - LclVarDsc* varDsc = lvaGetDesc(lclFld); + unsigned lclNum = lclFld->GetLclNum(); - var_types indType = tree->TypeGet(); + // TODO-ADDR: delete the "GetSize" check below once TYP_STRUCT LCL_FLD is supported. + // TODO-PhysicalVN: with the physical VN scheme, we no longer need the field sequence checks. if ((lclFld->GetFieldSeq() == FieldSeqStore::NotAField()) || !lvaInSsa(lclFld->GetLclNum()) || - !lclFld->HasSsaName()) + !lclFld->HasSsaName() || (lclFld->GetSize() == 0)) { - // This doesn't represent a proper field access or it's a struct - // with overlapping fields that is hard to reason about; return a new unique VN. - tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, indType)); + lclFld->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, lclFld->TypeGet())); } else { - ValueNumPair lclVNPair = varDsc->GetPerSsaData(ssaNum)->m_vnPair; - tree->gtVNPair = vnStore->VNPairApplySelectors(lclVNPair, lclFld->GetFieldSeq(), indType); + LclVarDsc* varDsc = lvaGetDesc(lclNum); + ValueNumPair lclVarValue = varDsc->GetPerSsaData(lclFld->GetSsaNum())->m_vnPair; + lclFld->gtVNPair = + vnStore->VNPairForLoad(lclVarValue, lvaLclExactSize(lclNum), lclFld->TypeGet(), + lclFld->GetLclOffs(), lclFld->GetSize()); // If we have byref field, we may have a zero-offset sequence to add. FieldSeqNode* zeroOffsetFldSeq = nullptr; if ((typ == TYP_BYREF) && GetZeroOffsetFieldMap()->Lookup(lclFld, &zeroOffsetFldSeq)) { - ValueNum addrExtended = vnStore->ExtendPtrVN(lclFld, zeroOffsetFldSeq); + ValueNum addrExtended = vnStore->ExtendPtrVN(lclFld, zeroOffsetFldSeq, 0); if (addrExtended != ValueNumStore::NoVN) { lclFld->gtVNPair.SetBoth(addrExtended); @@ -8547,7 +8643,7 @@ void Compiler::fgValueNumberTree(GenTree* tree) FieldSeqNode* zeroOffsetFieldSeq = nullptr; if (GetZeroOffsetFieldMap()->Lookup(tree, &zeroOffsetFieldSeq)) { - ValueNum addrExtended = vnStore->ExtendPtrVN(arg->AsIndir()->Addr(), zeroOffsetFieldSeq); + ValueNum addrExtended = vnStore->ExtendPtrVN(arg->AsIndir()->Addr(), zeroOffsetFieldSeq, 0); if (addrExtended != ValueNumStore::NoVN) { // We don't care about lib/cons differences for addresses. @@ -8597,7 +8693,6 @@ void Compiler::fgValueNumberTree(GenTree* tree) vnStore->VNPUnpackExc(addr->gtVNPair, &addrNvnp, &addrXvnp); // Is the dereference immutable? If so, model it as referencing the read-only heap. - // TODO-VNTypes: this code needs to encode the types of the indirections. if (tree->gtFlags & GTF_IND_INVARIANT) { assert(!isVolatile); // We don't expect both volatile and invariant @@ -8629,25 +8724,18 @@ void Compiler::fgValueNumberTree(GenTree* tree) assert(addrNvnp.BothEqual() && (addrXvnp == vnStore->VNPForEmptyExcSet())); ValueNum boxAddrVN = addrNvnp.GetLiberal(); ValueNum fieldSeqVN = vnStore->VNForFieldSeq(nullptr); + ValueNum offsetVN = vnStore->VNForIntPtrCon(-TARGET_POINTER_SIZE); ValueNum staticAddrVN = - vnStore->VNForFunc(tree->TypeGet(), VNF_PtrToStatic, boxAddrVN, fieldSeqVN); + vnStore->VNForFunc(tree->TypeGet(), VNF_PtrToStatic, boxAddrVN, fieldSeqVN, offsetVN); tree->gtVNPair = ValueNumPair(staticAddrVN, staticAddrVN); } - // Is this invariant indirect expected to always return a non-null value? - // TODO-VNTypes: non-null indirects should only be used for TYP_REFs. - else if ((tree->gtFlags & GTF_IND_NONNULL) != 0) + else // TODO-VNTypes: this code needs to encode the types of the indirections. { - assert(tree->gtFlags & GTF_IND_NONFAULTING); - tree->gtVNPair = vnStore->VNPairForFunc(tree->TypeGet(), VNF_NonNullIndirect, addrNvnp); - tree->gtVNPair = vnStore->VNPWithExc(tree->gtVNPair, addrXvnp); - } - else - { - tree->gtVNPair = - ValueNumPair(vnStore->VNForMapSelect(VNK_Liberal, TYP_REF, ValueNumStore::VNForROH(), - addrNvnp.GetLiberal()), - vnStore->VNForMapSelect(VNK_Conservative, TYP_REF, ValueNumStore::VNForROH(), - addrNvnp.GetConservative())); + // Is this invariant indirect expected to always return a non-null value? + VNFunc loadFunc = + ((tree->gtFlags & GTF_IND_NONNULL) != 0) ? VNF_InvariantNonNullLoad : VNF_InvariantLoad; + + tree->gtVNPair = vnStore->VNPairForFunc(tree->TypeGet(), loadFunc, addrNvnp); tree->gtVNPair = vnStore->VNPWithExc(tree->gtVNPair, addrXvnp); } } @@ -8675,102 +8763,55 @@ void Compiler::fgValueNumberTree(GenTree* tree) // We will "evaluate" this as part of the assignment. else if ((tree->gtFlags & GTF_IND_ASG_LHS) == 0) { + var_types loadType = tree->TypeGet(); + ssize_t offset = 0; + unsigned loadSize = tree->AsIndir()->Size(); + FieldSeqNode* localFldSeq = nullptr; - VNFuncApp funcApp; + VNFuncApp funcApp{VNF_COUNT}; - // Is it a local or a heap address? - if (addr->IsLocalAddrExpr(this, &lclVarTree, &localFldSeq) && lvaInSsa(lclVarTree->GetLclNum()) && - lclVarTree->HasSsaName()) + // TODO-1stClassStructs: delete layout-less "IND(struct)" nodes and the "loadSize == 0" condition. + if (loadSize == 0) + { + tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, loadType)); + } + else if (addr->IsLocalAddrExpr(this, &lclVarTree, &localFldSeq, &offset) && + lvaInSsa(lclVarTree->GetLclNum()) && lclVarTree->HasSsaName()) { unsigned ssaNum = lclVarTree->GetSsaNum(); LclVarDsc* varDsc = lvaGetDesc(lclVarTree); + // TODO-PhysicalVN: with the physical VN scheme, we no longer need the field sequence checks. + // TODO-PhysicalVN: use "DefinesLocalAddr" here for consistency with the store code. if ((localFldSeq == FieldSeqStore::NotAField()) || (localFldSeq == nullptr)) { tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, tree->TypeGet())); } else { - var_types indType = tree->TypeGet(); ValueNumPair lclVNPair = varDsc->GetPerSsaData(ssaNum)->m_vnPair; - tree->gtVNPair = vnStore->VNPairApplySelectors(lclVNPair, localFldSeq, indType); + unsigned lclSize = lvaLclExactSize(lclVarTree->GetLclNum()); + offset = offset + lclVarTree->GetLclOffs(); + + tree->gtVNPair = vnStore->VNPairForLoad(lclVNPair, lclSize, tree->TypeGet(), offset, loadSize); } - tree->gtVNPair = vnStore->VNPWithExc(tree->gtVNPair, addrXvnp); } else if (vnStore->GetVNFunc(addrNvnp.GetLiberal(), &funcApp) && (funcApp.m_func == VNF_PtrToStatic)) { - var_types indType = tree->TypeGet(); - ValueNum fieldSeqVN = funcApp.m_args[1]; - - FieldSeqNode* fldSeqForStaticVar = vnStore->FieldSeqVNToFieldSeq(fieldSeqVN); - - if (fldSeqForStaticVar != FieldSeqStore::NotAField()) - { - assert(fldSeqForStaticVar != nullptr); - - ValueNum selectedStaticVar; - // We model statics as indices into the GcHeap (which is a subset of ByrefExposed). - size_t structSize = 0; - selectedStaticVar = vnStore->VNApplySelectors(VNK_Liberal, fgCurMemoryVN[GcHeap], - fldSeqForStaticVar, &structSize); - selectedStaticVar = vnStore->VNApplySelectorsTypeCheck(selectedStaticVar, indType, structSize); + fldSeq = vnStore->FieldSeqVNToFieldSeq(funcApp.m_args[1]); + offset = vnStore->ConstantValue(funcApp.m_args[2]); - tree->gtVNPair.SetLiberal(selectedStaticVar); - tree->gtVNPair.SetConservative(vnStore->VNForExpr(compCurBB, indType)); - } - else - { - JITDUMP(" *** Missing field sequence info for VNF_PtrToStatic value GT_IND\n"); - tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, indType)); // a new unique value number - } - tree->gtVNPair = vnStore->VNPWithExc(tree->gtVNPair, addrXvnp); + // Note VNF_PtrToStatic statics are currently always "simple". + fgValueNumberFieldLoad(tree, /* baseAddr */ nullptr, fldSeq, offset); } else if (vnStore->GetVNFunc(addrNvnp.GetLiberal(), &funcApp) && (funcApp.m_func == VNF_PtrToArrElem)) { - fgValueNumberArrIndexVal(tree, &funcApp, addrXvnp); + fgValueNumberArrayElemLoad(tree, &funcApp); } - else if (addr->IsFieldAddr(this, &baseAddr, &fldSeq)) + else if (addr->IsFieldAddr(this, &baseAddr, &fldSeq, &offset)) { - assert((fldSeq != nullptr) && (fldSeq != FieldSeqStore::NotAField())); - - // The size of the ultimate value we will select, if it is of a struct type. - size_t structSize = 0; - ValueNum valueVN = ValueNumStore::NoVN; - - if (baseAddr != nullptr) - { - // Instance field / "complex" static: heap[field][baseAddr][struct fields...]. - - // Get the selector for the first field. - var_types firstFieldType; - ValueNum firstFieldSelectorVN = - vnStore->VNForFieldSelector(fldSeq->GetFieldHandle(), &firstFieldType, &structSize); - - ValueNum fldMapVN = - vnStore->VNForMapSelect(VNK_Liberal, TYP_MEM, fgCurMemoryVN[GcHeap], firstFieldSelectorVN); - - ValueNum firstFieldValueSelectorVN = vnStore->VNLiberalNormalValue(baseAddr->gtVNPair); - - // Construct the value number for fldMap[baseAddr]. - ValueNum firstFieldValueVN = - vnStore->VNForMapSelect(VNK_Liberal, firstFieldType, fldMapVN, firstFieldValueSelectorVN); - - // Finally, account for the rest of the fields in the sequence. - valueVN = - vnStore->VNApplySelectors(VNK_Liberal, firstFieldValueVN, fldSeq->GetNext(), &structSize); - } - else - { - // "Simple" static: heap[static][struct fields...]. - valueVN = vnStore->VNApplySelectors(VNK_Liberal, fgCurMemoryVN[GcHeap], fldSeq, &structSize); - } - - valueVN = vnStore->VNApplySelectorsTypeCheck(valueVN, tree->TypeGet(), structSize); - tree->gtVNPair.SetLiberal(valueVN); - - // The conservative value is a new, unique VN. - tree->gtVNPair.SetConservative(vnStore->VNForExpr(compCurBB, tree->TypeGet())); - tree->gtVNPair = vnStore->VNPWithExc(tree->gtVNPair, addrXvnp); + assert(fldSeq != FieldSeqStore::NotAField()); + fgValueNumberFieldLoad(tree, baseAddr, fldSeq, offset); } else // We don't know where the address points, so it is an ByrefExposed load. { @@ -8778,8 +8819,9 @@ void Compiler::fgValueNumberTree(GenTree* tree) ValueNum loadVN = fgValueNumberByrefExposedLoad(typ, addrVN); tree->gtVNPair.SetLiberal(loadVN); tree->gtVNPair.SetConservative(vnStore->VNForExpr(compCurBB, tree->TypeGet())); - tree->gtVNPair = vnStore->VNPWithExc(tree->gtVNPair, addrXvnp); } + + tree->gtVNPair = vnStore->VNPWithExc(tree->gtVNPair, addrXvnp); } // To be able to propagate exception sets, we give location nodes the "Void" VN. @@ -9227,9 +9269,11 @@ void Compiler::fgValueNumberArrIndexAddr(GenTreeArrAddr* arrAddr) FieldSeqNode* fldSeq = nullptr; GetZeroOffsetFieldMap()->Lookup(arrAddr, &fldSeq); - ValueNum fldSeqVN = vnStore->VNForFieldSeq(fldSeq); + ValueNum fldSeqVN = vnStore->VNForFieldSeq(fldSeq); + ValueNum offsetVN = vnStore->VNForIntPtrCon(0); + ValueNum elemOffsVN = vnStore->VNForFunc(TYP_I_IMPL, VNF_ArrElemOffset, fldSeqVN, offsetVN); - ValueNum arrAddrVN = vnStore->VNForFunc(TYP_BYREF, VNF_PtrToArrElem, elemTypeEqVN, arrVN, inxVN, fldSeqVN); + ValueNum arrAddrVN = vnStore->VNForFunc(TYP_BYREF, VNF_PtrToArrElem, elemTypeEqVN, arrVN, inxVN, elemOffsVN); ValueNumPair arrAddrVNP = ValueNumPair(arrAddrVN, arrAddrVN); arrAddr->gtVNPair = vnStore->VNPWithExc(arrAddrVNP, vnStore->VNPExceptionSet(arrAddr->Addr()->gtVNPair)); } @@ -9584,6 +9628,58 @@ ValueNumPair ValueNumStore::VNPairForCast(ValueNumPair srcVNPair, return {castLibVN, castConVN}; } +//------------------------------------------------------------------------ +// VNForBitCast: Get the VN representing bitwise reinterpretation of types. +// +// Arguments: +// srcVN - (VN of) the value being cast from +// castToType - The type being cast to +// +// Return Value: +// The value number representing "IND(ADDR(srcVN))". Notably, +// this includes the special sign/zero-extension semantic for small types. +// +// Notes: +// Bitcasts play a very significant role of representing identity (entire) +// selections and stores in the physical maps: when we have to "normalize" +// the types, we need something that represents a "nop" in the selection +// process, and "VNF_BitCast" is that function. See also the notes for +// "VNForLoadStoreBitCast". +// +ValueNum ValueNumStore::VNForBitCast(ValueNum srcVN, var_types castToType) +{ + // BitCast(BitCast(x)) => BitCast(x). + // This ensures we do not end up with pathologically long chains of + // bitcasts in physical maps. We could do a similar optimization in + // "VNForMapPhysical[Store|Select]"; we presume that's not worth it, + // and it is better TP-wise to skip bitcasts "lazily" when doing the + // selection, as the scenario where they are expected to be common, + // single-field structs, implies short selection chains. + VNFuncApp srcVNFunc; + if (GetVNFunc(srcVN, &srcVNFunc) && (srcVNFunc.m_func == VNF_BitCast)) + { + srcVN = srcVNFunc.m_args[0]; + } + + var_types srcType = TypeOfVN(srcVN); + + if (srcType == castToType) + { + return srcVN; + } + + assert((castToType != TYP_STRUCT) || (srcType != TYP_STRUCT)); + + // TODO-PhysicalVN: implement proper constant folding for BitCast. + if ((srcVNFunc.m_func == VNF_ZeroObj) || + ((castToType != TYP_STRUCT) && (srcType != TYP_STRUCT) && (srcVN == VNZeroForType(srcType)))) + { + return VNZeroForType(castToType); + } + + return VNForFunc(castToType, VNF_BitCast, srcVN, VNForIntCon(castToType)); +} + void Compiler::fgValueNumberHelperCallFunc(GenTreeCall* call, VNFunc vnf, ValueNumPair vnpExc) { unsigned nArgs = ValueNumStore::VNFuncArity(vnf); @@ -10882,7 +10978,9 @@ void Compiler::fgDebugCheckValueNumberedTree(GenTree* tree) break; case VNF_PtrToArrElem: - fullFldSeq = vnStore->FieldSeqVNToFieldSeq(vnFunc.m_args[3]); + VNFuncApp offsFunc; + vnStore->GetVNFunc(vnFunc.m_args[3], &offsFunc); + fullFldSeq = vnStore->FieldSeqVNToFieldSeq(offsFunc.m_args[0]); break; default: diff --git a/src/coreclr/jit/valuenum.h b/src/coreclr/jit/valuenum.h index 71a1556d74c38c..b0cbc1e0653b0b 100644 --- a/src/coreclr/jit/valuenum.h +++ b/src/coreclr/jit/valuenum.h @@ -16,104 +16,126 @@ // // Finally, we will also use value numbers to express control-flow-dependent assertions. Some test may // imply that after the test, something new is known about a value: that an object reference is non-null -// after a dereference (since control flow continued because no exception was thrown); that an integer value -// is restricted to some subrange in after a comparison test; etc. +// after a dereference (since control flow continued because no exception was thrown); that an integer +// value is restricted to some subrange in after a comparison test; etc. // In addition to classical numbering, this implementation also performs disambiguation of heap writes, -// using memory SSA and the guarantee that two managed field accesses will not alias (if the fields do -// not overlap). This is the sole reason for the existence of field sequences - providing information -// to numbering on which addresses represent "proper", non-aliasing, bases for loads and stores. Note -// that a field sequence is *only* about the address - it is the VN's responsibility to properly "give up" -// on reads and stores that are too wide or of otherwise incompatible types - this logic is encapsulated -// in the methods VNApplySelectorsTypeCheck and VNApplySelectorsAssignTypeCoerce. Note as well that this -// design, based on symbols, not offsets, is why the compiler must take such great care in maintaining the -// field sequences throughout its many transformations - accessing a field of type A "through" a field from -// a distinct type B will result in effectively undefined behavior. We say this is by design for classes -// (i. e. that people using Unsafe.As(object) need to *really* know what they're doing) and fix up places -// where the compiler itself produces these type mismatches with structs (which can arise due to "reinterpret -// casts" or morph's transforms which fold copies of structs with bitwise compatible layouts). +// using memory SSA and the following aliasing model: +// +// 1. Arrays of different types do not alias - taking into account the array compatibilty rules, i. e. +// "int[] <-> uint[]" and such being allowed. +// 2. Different static fields do not alias (meaning mutable overlapping RVA statics are not supported). +// 3. Different class fields do not alias. Struct fields are allowed to alias - this supports code that +// does reinterpretation of structs (e. g. "Unsafe.As(...)"), but makes it UB +// to alias reference types in the same manner (including via explicit layout). +// +// The no aliasing rule for fields should be interpreted to mean that "ld[s]fld[a] FieldOne" cannot refer +// to the same location as "ld[s]fld[a] FieldTwo". The aliasing model above reflects the fact type safety +// rules in .NET largely only apply to reference types, while struct locations can be and often are treated +// by user code (and, importantly, the compiler itself) as simple blobs of bytes. // // Abstractly, numbering maintains states of memory in "maps", which are indexed into with various "selectors", -// loads reading from said maps and stores recording new states for them (note that as with everything VN, the -// "maps" are immutable, thus an update is performed via deriving a new map from an existing one). +// loads reading from said maps and stores recording new states for them (note that as with everything VN, +// the "maps" are immutable, thus an update is performed via deriving a new map from an existing one). +// +// Due to the fact we allow struct field access to alias, but still want to optimize it, our model has two +// types of maps and selectors: precise and physical. Precise maps allow arbitrary selectors, and if those +// are known to be distinct values (e. g. different constants), the values they select are also presumed to +// represent distinct locations. Physical maps, on the other hand, can only have one type of selector: "the +// physical selector", representing offset of the location and its size (in bytes), where both must be known +// at compile time. Naturally, different physical selectors can refer to overlapping locations. +// +// The following "VNFunc"s are relevant when it comes to map numbering: +// +// 1. "MapSelect" - represents a "value" taken from another map at a given index: "map[index] => value". It is +// the "VNForMapSelect[Work]" method that represents the core of the selection infrastructure: it performs +// various reductions based on the maps (listed below) being selected from, before "giving up" and creating +// a new "MapSelect" VN. "MapSelect"s are used for both precise and physical maps. +// 2. "Phi[Memory]Def" - the PHI function applied to multiple reaching definitions for a given block. PHIs can +// be reduced by the selection process: "Phi(d:1, d:2, ...)[index]" is evaluated as "Phi(d:1[index], ...)", +// so if all the inner selections ("d:n[index]") agree, that value is returned as the selected one. +// 3. "MapStore" - this is the precise "update" map, it represents a map after a "set" operation at some index. +// MapStore VNs naturally "chain" together, the next map representing an update of the previous, and will be +// traversed by the selection process as long as the store indices are constant, and different from the one +// being selected (meaning they represent distinct locations): "map[F0 := V0][F1 := V1][F0]" => "V0". +// 4. "MapPhysicalStore" - the physical equivalent to "MapStore", can only be indexed with physical selectors, +// with the selection rules taking into account aliasability of physical locations. +// 5. "BitCast" - the physical map representing "identity" selection ("map[0:sizeof(map) - 1]"). Exists because +// physical maps themselves do not have a strong type identity (the physical selector only cares about size). +// but the VN/IR at large do. Is a no-op in the selection process. One can notice that we could have chosen +// to represent this concept with an identity "MapPhysicalStore", however, a different "VNFunc" was ultimately +// chosen due to it being easier to reason about and a little cheaper, with the expectation that "BitCast"s +// would be reasonably common - the scenario they are meant to handle are stores/loads to/from structs with +// one field, where the location can be referenced from the IR as both TYP_STRUCT and the field's type. +// +// We give "placeholder" types (TYP_UNDEF and TYP_UNKNOWN as TYP_MEM and TYP_HEAP) to maps that do not represent +// values found in IR, which are currently all precise (though that is not a requirement of the model). +// +// We choose to maintain the following invariants with regards to types of physical locations: +// +// 1. Tree VNs are always "normalized on load" - their types are made to match (via bitcasts). We presume this +// makes the rest of the compiler code simpler, as it will not have to reason about "TYP_INT" trees having +// "TYP_FLOAT" value numbers. This normalization is currently not always done; that should be fixed. +// 2. Types of locals are "normalized on store" - this is different from the rest of physical locations, as not +// only VN looks at these value numbers (stored in SSA descriptors), and similar to the tree case, we presume +// it is simpler to reason about matching types. +// 3. Types of all other locations (array elements and fields) are not normalized - these only appear in the VN +// itself as physical maps / values. // -// Memory states are represented with value numbers corresponding to the following VNFunc's: -// 1. MemOpaque - for opaque memory states, usually caused by writing to byrefs, or other -// non-analyzable memory operations such as atomics or volatile writes (and reads!). -// 2. PhiMemoryDef - this is simply the PHI function applied to multiple reaching memory -// definitions for a given block. -// 3. MapStore - this is the "update" map, it represents a map after a "set" operation at a -// given index. Note that the value stored can itself be a map (memory state), for example, -// with the store to a field of a struct that itself is a field of a class, the value -// recorded for the struct is a MapStore($StructInClassFieldMap, $ValueField, $Value). -// Because of this, these VNs need to have proper types, those being the types that the -// maps they represent updated states of have (in the example above, the VN would have -// a "struct" type, for example). MapStore VNs naturally "chain" together, the next map -// representing an update of the previous, and VNForMapSelect, when it tries to find -// the value traverses through these chains, as long as the indices are constant (meaning -// that they represent distinct locations, e. g. different fields). -// 4. MapSelect - if MapStore is an "update/assign" operation, MapSelect is the "read" one. -// It represents a "value" (which, as with MapStore, can itself can be a memory region) -// taken from another map at a given index. As such, it must have a type that corresponds -// to the "index/selector", in practical terms - the field's type. +// Note as well how we handle type identity for structs: we canonicalize on their size. This has the significant +// consequence that any two equally-sized structs can be given the same value number, even if they have different +// ABI characteristics or GC layout. The primary motivations for this are throughout and simplicity, however, we +// would also like the compiler at large to treat structs with compatible layouts as equivalent, so that we can +// propagate copies between them freely. // -// Note that we give "placeholder" types (TYP_UNDEF and TYP_UNKNOWN as TYP_MEM and TYP_HEAP) -// to maps that do not represent values found in IR. This is just to avoid confusion and -// facilitate more precise validating checks. // -// Let's review the following snippet to demonstrate how the MapSelect/MapStore machinery works -// together to deliver the results that it does. Say we have this snippet of (C#) code: +// Let's review the following snippet to demonstrate how the MapSelect/MapStore machinery works. Say we have this +// snippet of (C#) code: // // int Procedure(OneClass obj, AnotherClass subj, int objVal, int subjVal) // { -// obj.StructField.AnotherStructField.ScalarField = objVal; +// obj.StructField.ScalarField = objVal; // subj.OtherScalarField = subjVal; // -// return obj.StructField.AnotherStructField.ScalarField + subj.OtherScalarField; +// return obj.StructField.ScalarField + subj.OtherScalarField; // } // -// On entry, we assign some VN to the GcHeap (VN mostly only cares about GcHeap, so from now on -// the term "heap" will be used to mean GcHeap), $Heap. +// On entry, we assign some VN to the GcHeap (VN mostly only cares about GcHeap, so from now on the term "heap" +// will be used to mean GcHeap), $Heap. // -// A store to the ScalarField is seen. Now, the value numbering of fields is done in the following -// pattern for maps that it builds: [$Heap][$FirstField][$Object][$SecondField]...[$FieldToStoreInto]. -// It may seem odd that the indexing is done first for the field, and only then for the object, but -// the reason for that is the fact that it enables MapStores to $Heap to refer to distinct selectors, -// thus enabling the traversal through the map updates when looking for the values that were stored. -// Were $Object VNs used for this, the traversal could not be performed, as two numerically different VNs -// can, obviously, refer to the same object. +// A store to the ScalarField is seen. Now, the value numbering of fields is done in the following pattern for +// maps that it builds: [$Heap][$FirstField][$Object][offset:offset + size of the store]. It may seem odd that +// the indexing is done first for the field, and only then for the object, but the reason for that is the fact +// that it enables MapStores to $Heap to refer to distinct selectors, thus enabling the traversal through the +// map updates when looking for the values that were stored. Were $Object VNs used for this, the traversal could +// not be performed, as two numerically different VNs can, obviously, refer to the same object. // // With that in mind, the following maps are first built for the store ("field VNs" - VNs for handles): // -// $StructFieldMap = MapSelect($Heap, $StructField) -// $StructFieldForObjMap = MapSelect($StructFieldMap, $Obj) -// $AnotherStructFieldMap = MapSelect($StructFieldForObjMap, $AnotherStructField) -// $ScalarFieldMap = MapSelect($AnotherStructFieldMap, $ScalarField) +// $StructFieldMap = MapSelect($Heap, $StructField) +// $StructFieldForObjMap = MapSelect($StructFieldMap, $Obj) // -// The building of maps for the individual fields in the sequence [AnotherStructField, ScalarField] is -// usually performed by VNApplySelectors, which is just a convenience method that loops over all the -// fields, calling VNForMapSelect on them. Now that we know where to store, the store maps are built: +// Now that we know where to store, the store maps are built: // -// $NewAnotherStructFieldMap = MapStore($AnotherStructFieldMap, $ScalarField, $ObjVal) -// $NewStructFieldForObjMap = MapStore($StructFieldForObjMap, $AnotherStructField, $NewAnotherStructFieldMap) -// $NewStructFieldMap = MapStore($StructFieldMap, $Obj, $NewStructFieldForObjMap) -// $NewHeap = MapStore($Heap, $StructField, $NewStructFieldMap) +// $ScalarFieldSelector = PhysicalSelector(offsetof(ScalarField), sizeof(ScalarField)) +// $NewStructFieldForObjMap = MapPhysicalStore($StructFieldForObjMap, $ScalarFieldSelector, $ObjVal) +// $NewStructFieldMap = MapStore($StructFieldMap, $Obj, $NewStructFieldForObjMap) +// $NewHeap = MapStore($Heap, $StructField, $NewStructFieldMap) // -// Notice that the maps are built in the opposite order, as we must first know the value of the "narrower" -// map to store into the "wider" map (this is implemented via recursion in VNApplySelectorsAssign). +// Notice that the maps are built in the opposite order, as we must first know the value of the "narrower" map to +// store into the "wider" map. // -// Similarly, the numbering is performed for "subj.OtherScalarField = subjVal", and the heap state updated -// (say to $NewHeapWithSubj). Now when we call VNForMapSelect to find out the stored values when numbering -// the reads, the following traversal is performed (the '[]' operator representing VNForMapSelect): +// Similarly, the numbering is performed for "subj.OtherScalarField = subjVal", and the heap state updated (say to +// $NewHeapWithSubj). Now when we call "VNForMapSelect" to find out the stored values when numbering the reads, the +// following traversal is performed: // // $obj.StructField.AnotherStructField.ScalarField -// = $NewHeapWithSubj[$StructField][$Obj][$AnotherStructField][ScalarField]: +// = $NewHeapWithSubj[$StructField][$Obj][$ScalarFieldSelector]: // "$NewHeapWithSubj.Index == $StructField" => false (not the needed map). // "IsConst($NewHeapWithSubj.Index) && IsConst($StructField)" => true (can continue, non-aliasing indices). // "$NewHeap.Index == $StructField" => true, Value is $NewStructFieldMap. // "$NewStructFieldMap.Index == $Obj" => true, Value is $NewStructFieldForObjMap. -// "$NewStructFieldForObjMap.Index == $AnotherStructField" => true, Value is $NewAnotherStructFieldMap. -// "$NewAnotherStructFieldMap.Index == $ScalarField" => true, Value is $ObjVal (found the value!). +// "$NewStructFieldForObjMap.Index == $ScalarFieldSelector" => true, Value is $ObjVal (found it!). // // And similarly for the $SubjVal - we end up with a nice $Add($ObjVal, $SubjVal) feeding the return. // @@ -121,8 +143,7 @@ // modeled as straight indicies into the heap (MapSelect($Heap, $Field) returns the value of the field for them), // arrays - like fields, but with the primiary selector being not the first field, but the "equivalence class" of // an array, i. e. the type of its elements, taking into account things like "int[]" being legally aliasable as -// "uint[]". MapSelect & MapStore are also used to number local fields, with the primary selectors being (VNs of) -// their local numbers. +// "uint[]". Physical maps are used to number local fields. /*****************************************************************************/ #ifndef _VALUENUM_H_ @@ -390,6 +411,7 @@ class ValueNumStore // This block of methods gets value numbers for constants of primitive types. ValueNum VNForIntCon(INT32 cnsVal); + ValueNum VNForIntPtrCon(ssize_t cnsVal); ValueNum VNForLongCon(INT64 cnsVal); ValueNum VNForFloatCon(float cnsVal); ValueNum VNForDoubleCon(double cnsVal); @@ -421,25 +443,16 @@ class ValueNumStore // And the single constant for an object reference type. static ValueNum VNForNull() { - // We reserve Chunk 0 for "special" VNs. SRC_Null (== 0) is the VN of "null". return ValueNum(SRC_Null); } - // The ROH map is the map for the "read-only heap". We assume that this is never mutated, and always - // has the same value number. - static ValueNum VNForROH() - { - // We reserve Chunk 0 for "special" VNs. Let SRC_ReadOnlyHeap (== 3) be the read-only heap. - return ValueNum(SRC_ReadOnlyHeap); - } - // A special value number for "void" -- sometimes a type-void thing is an argument, // and we want the args to be non-NoVN. static ValueNum VNForVoid() { - // We reserve Chunk 0 for "special" VNs. Let SRC_Void (== 4) be the value for "void". return ValueNum(SRC_Void); } + static ValueNumPair VNPForVoid() { return ValueNumPair(VNForVoid(), VNForVoid()); @@ -448,10 +461,9 @@ class ValueNumStore // A special value number for the empty set of exceptions. static ValueNum VNForEmptyExcSet() { - // We reserve Chunk 0 for "special" VNs. Let SRC_EmptyExcSet (== 5) be the value for the empty set of - // exceptions. return ValueNum(SRC_EmptyExcSet); } + static ValueNumPair VNPForEmptyExcSet() { return ValueNumPair(VNForEmptyExcSet(), VNForEmptyExcSet()); @@ -597,6 +609,8 @@ class ValueNumStore ValueNum VNForMapSelect(ValueNumKind vnk, var_types type, ValueNum map, ValueNum index); + ValueNum VNForMapPhysicalSelect(ValueNumKind vnk, var_types type, ValueNum map, unsigned offset, unsigned size); + // A method that does the work for VNForMapSelect and may call itself recursively. ValueNum VNForMapSelectWork( ValueNumKind vnk, var_types type, ValueNum map, ValueNum index, int* pBudget, bool* pUsedRecursiveVN); @@ -604,7 +618,23 @@ class ValueNumStore // A specialized version of VNForFunc that is used for VNF_MapStore and provides some logging when verbose is set ValueNum VNForMapStore(ValueNum map, ValueNum index, ValueNum value); - ValueNum VNForFieldSelector(CORINFO_FIELD_HANDLE fieldHnd, var_types* pFieldType, size_t* pStructSize = nullptr); + ValueNum VNForMapPhysicalStore(ValueNum map, unsigned offset, unsigned size, ValueNum value); + + bool MapIsPrecise(ValueNum map) const + { + return (TypeOfVN(map) == TYP_HEAP) || (TypeOfVN(map) == TYP_MEM); + } + + bool MapIsPhysical(ValueNum map) const + { + return !MapIsPrecise(map); + } + + ValueNum EncodePhysicalSelector(unsigned offset, unsigned size); + + unsigned DecodePhysicalSelector(ValueNum selector, unsigned* pSize); + + ValueNum VNForFieldSelector(CORINFO_FIELD_HANDLE fieldHnd, var_types* pFieldType, unsigned* pSize); // These functions parallel the ones above, except that they take liberal/conservative VN pairs // as arguments, and return such a pair (the pair of the function applied to the liberal args, and @@ -646,31 +676,31 @@ class ValueNumStore // This controls extra tracing of the "evaluation" of "VNF_MapSelect" functions. #define FEATURE_VN_TRACE_APPLY_SELECTORS 1 - ValueNum VNApplySelectors(ValueNumKind vnk, - ValueNum map, - FieldSeqNode* fieldSeq, - size_t* wbFinalStructSize = nullptr); + ValueNum VNForLoad(ValueNumKind vnk, + ValueNum locationValue, + unsigned locationSize, + var_types loadType, + ssize_t offset, + unsigned loadSize); - ValueNum VNApplySelectorsTypeCheck(ValueNum value, var_types indType, size_t valueStructSize); + ValueNumPair VNPairForLoad( + ValueNumPair locationValue, unsigned locationSize, var_types loadType, ssize_t offset, unsigned loadSize); - ValueNum VNApplySelectorsAssign( - ValueNumKind vnk, ValueNum map, FieldSeqNode* fieldSeq, ValueNum value, var_types dstIndType); + ValueNum VNForStore( + ValueNum locationValue, unsigned locationSize, ssize_t offset, unsigned storeSize, ValueNum value); - ValueNum VNApplySelectorsAssignTypeCoerce(ValueNum value, var_types dstIndType); + ValueNumPair VNPairForStore( + ValueNumPair locationValue, unsigned locationSize, ssize_t offset, unsigned storeSize, ValueNumPair value); - ValueNumPair VNPairApplySelectors(ValueNumPair map, FieldSeqNode* fieldSeq, var_types indType); - - ValueNumPair VNPairApplySelectorsAssign(ValueNumPair map, - FieldSeqNode* fieldSeq, - ValueNumPair value, - var_types dstIndType) + bool LoadStoreIsEntire(unsigned locationSize, ssize_t offset, unsigned indSize) const { - return ValueNumPair(VNApplySelectorsAssign(VNK_Liberal, map.GetLiberal(), fieldSeq, value.GetLiberal(), - dstIndType), - VNApplySelectorsAssign(VNK_Conservative, map.GetConservative(), fieldSeq, - value.GetConservative(), dstIndType)); + return (offset == 0) && (locationSize == indSize); } + ValueNum VNForLoadStoreBitCast(ValueNum value, var_types indType, unsigned indSize); + + ValueNumPair VNPairForLoadStoreBitCast(ValueNumPair value, var_types indType, unsigned indSize); + // Compute the ValueNumber for a cast ValueNum VNForCast(ValueNum srcVN, var_types castToType, @@ -685,6 +715,8 @@ class ValueNumStore bool srcIsUnsigned = false, bool hasOverflowCheck = false); + ValueNum VNForBitCast(ValueNum srcVN, var_types castToType); + bool IsVNNotAField(ValueNum vn); ValueNum VNForFieldSeq(FieldSeqNode* fieldSeq); @@ -698,7 +730,7 @@ class ValueNumStore ValueNum ExtendPtrVN(GenTree* opA, GenTree* opB); // If "opA" has a PtrToLoc, PtrToArrElem, or PtrToStatic application as its value numbers, returns the VN for the // pointer form extended with "fieldSeq"; or else NoVN. - ValueNum ExtendPtrVN(GenTree* opA, FieldSeqNode* fieldSeq); + ValueNum ExtendPtrVN(GenTree* opA, FieldSeqNode* fieldSeq, ssize_t offset); // Queries on value numbers. // All queries taking value numbers require that those value numbers are valid, that is, that @@ -706,7 +738,7 @@ class ValueNumStore // not true. // Returns TYP_UNKNOWN if the given value number has not been given a type. - var_types TypeOfVN(ValueNum vn); + var_types TypeOfVN(ValueNum vn) const; // Returns BasicBlock::MAX_LOOP_NUM if the given value number's loop nest is unknown or ill-defined. BasicBlock::loopNumber LoopOfVN(ValueNum vn); @@ -1025,6 +1057,10 @@ class ValueNumStore // Prints a representation of a MapStore operation on standard out. void vnDumpMapStore(Compiler* comp, VNFuncApp* mapStore); + void vnDumpPhysicalSelector(ValueNum selector); + + void vnDumpMapPhysicalStore(Compiler* comp, VNFuncApp* mapPhysicalStore); + // Requires "memOpaque" to be a mem opaque VNFuncApp // Prints a representation of a MemOpaque state on standard out. void vnDumpMemOpaque(Compiler* comp, VNFuncApp* memOpaque); @@ -1047,6 +1083,8 @@ class ValueNumStore // Prints the cast's representation mirroring GT_CAST's dump format. void vnDumpCast(Compiler* comp, ValueNum castVN); + void vnDumpBitCast(Compiler* comp, VNFuncApp* bitCast); + // Requires "zeroObj" to be a VNF_ZeroObj. Prints its representation. void vnDumpZeroObj(Compiler* comp, VNFuncApp* zeroObj); @@ -1405,10 +1443,10 @@ class ValueNumStore return m_VNFunc4Map; } + // We reserve Chunk 0 for "special" VNs. enum SpecialRefConsts { SRC_Null, - SRC_ReadOnlyHeap, SRC_Void, SRC_EmptyExcSet, diff --git a/src/coreclr/jit/valuenumfuncs.h b/src/coreclr/jit/valuenumfuncs.h index 32f17c685e137f..b5e23ca8513723 100644 --- a/src/coreclr/jit/valuenumfuncs.h +++ b/src/coreclr/jit/valuenumfuncs.h @@ -6,23 +6,25 @@ // ) // clang-format off -ValueNumFuncDef(MemOpaque, 1, false, false, false) // Args: 0: loop num -ValueNumFuncDef(MapStore, 4, false, false, false) // Args: 0: map, 1: index (e. g. field handle), 2: value being stored, 3: loop num. -ValueNumFuncDef(MapSelect, 2, false, false, false) // Args: 0: map, 1: key. +ValueNumFuncDef(MemOpaque, 1, false, false, false) // Args: 0: loop num +ValueNumFuncDef(MapSelect, 2, false, false, false) // Args: 0: map, 1: key. +ValueNumFuncDef(MapStore, 4, false, false, false) // Args: 0: map, 1: index (e. g. field handle), 2: value being stored, 3: loop num. +ValueNumFuncDef(MapPhysicalStore, 3, false, false, false) // Args: 0: map, 1: "physical selector": offset and size, 2: value being stored +ValueNumFuncDef(BitCast, 2, false, false, false) // Args: 0: VN of the arg, 1: VN of the target type +ValueNumFuncDef(ZeroObj, 1, false, false, false) // Args: 0: VN of the class handle. +ValueNumFuncDef(PhiDef, 3, false, false, false) // Args: 0: local var # (or -1 for memory), 1: SSA #, 2: VN of definition. +ValueNumFuncDef(PhiMemoryDef, 2, false, false, false) // Args: 0: VN for basic block pointer, 1: VN of definition +ValueNumFuncDef(Phi, 2, false, false, false) // A phi function. Only occurs as arg of PhiDef or PhiMemoryDef. Arguments are SSA numbers of var being defined. ValueNumFuncDef(PtrToLoc, 2, false, true, false) // Pointer (byref) to a local variable. Args: VN's of: 0: var num, 1: FieldSeq. -ValueNumFuncDef(PtrToArrElem, 4, false, false, false) // Pointer (byref) to an array element. Args: 0: array elem type eq class var_types value, VN's of: 1: array, 2: index, 3: FieldSeq. -ValueNumFuncDef(PtrToStatic, 2, false, true, false) // Pointer (byref) to a static variable (or possibly a field thereof, if the static variable is a struct). - // Args: 0: (VN of) the box's address if the static is "boxed", 1: the field sequence, of which the first element is the static itself. - -ValueNumFuncDef(Phi, 2, false, false, false) // A phi function. Only occurs as arg of PhiDef or PhiMemoryDef. Arguments are SSA numbers of var being defined. -ValueNumFuncDef(PhiDef, 3, false, false, false) // Args: 0: local var # (or -1 for memory), 1: SSA #, 2: VN of definition. -// Wouldn't need this if I'd made memory a regular local variable... -ValueNumFuncDef(PhiMemoryDef, 2, false, false, false) // Args: 0: VN for basic block pointer, 1: VN of definition -ValueNumFuncDef(ZeroObj, 1, false, false, false) // Zero-initialized struct. Args: 0: VN of the class handle. -ValueNumFuncDef(InitVal, 1, false, false, false) // An input arg, or init val of a local Args: 0: a constant VN. - +ValueNumFuncDef(PtrToArrElem, 4, false, false, false) // Pointer (byref) to an array element. Args: 0: array elem type eq class var_types value, VN's of: 1: array, 2: index, 3: "ArrElemOffset" VN. +ValueNumFuncDef(ArrElemOffset, 2, false, false, false) // Args: 0: (VN of) the field sequence, 1: (VN of) the offset +ValueNumFuncDef(PtrToStatic, 3, false, true, false) // Pointer (byref) to a static variable (or possibly a field thereof, if the static variable is a struct). + // Args: 0: (VN of) the box's address if the static is "boxed", + // 1: (VN of) the field sequence, of which the first element is the static itself. + // 2: (VN of) offset for the constituent struct fields +ValueNumFuncDef(InitVal, 1, false, false, false) // An input arg, or init val of a local Args: 0: a constant VN. ValueNumFuncDef(Cast, 2, false, false, false) // VNF_Cast: Cast Operation changes the representations size and unsigned-ness. // Args: 0: Source for the cast operation. @@ -139,8 +141,9 @@ ValueNumFuncDef(JitReadyToRunNewArr, 3, false, true, false) ValueNumFuncDef(Box, 3, false, false, false) ValueNumFuncDef(BoxNullable, 3, false, false, false) -ValueNumFuncDef(LazyStrCns, 2, false, true, false) // lazy-initialized string literal (helper) -ValueNumFuncDef(NonNullIndirect, 1, false, true, false) // this indirect is expected to always return a non-null value +ValueNumFuncDef(LazyStrCns, 2, false, true, false) // Lazy-initialized string literal (helper) +ValueNumFuncDef(InvariantLoad, 1, false, false, false) // Args: 0: (VN of) the address. +ValueNumFuncDef(InvariantNonNullLoad, 1, false, true, false) // Args: 0: (VN of) the address. ValueNumFuncDef(Unbox, 2, false, true, false) ValueNumFuncDef(LT_UN, 2, false, false, false) // unsigned or unordered comparisons