Skip to content

Commit

Permalink
fixed dynamic shape test cases
Browse files Browse the repository at this point in the history
  • Loading branch information
antonvor committed Dec 15, 2023
1 parent 0fa3ff6 commit aaa63c7
Show file tree
Hide file tree
Showing 2 changed files with 40 additions and 1 deletion.
24 changes: 23 additions & 1 deletion src/plugins/intel_cpu/src/nodes/fullyconnected.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -464,11 +464,33 @@ void FullyConnected::prepareWeightsUsingDummyShape() {
if (selected_pd == nullptr)
OPENVINO_THROW("Preferable primitive descriptor is not set for node ", getName(), ".");

auto inDesc = MemoryDescUtils::convertToDnnlMemoryDesc(MemoryDescUtils::makeDummyDesc(*getBaseMemDescAtInputPort(DATA_ID)));
DnnlMemoryDescPtr inDesc = nullptr;
auto weightDesc = MemoryDescUtils::convertToDnnlMemoryDesc(weightDescIP);
auto biasDesc = withBiases ? MemoryDescUtils::convertToDnnlMemoryDesc(getBaseMemDescAtInputPort(BIAS_ID)) : nullptr;
auto outDesc = MemoryDescUtils::convertToDnnlMemoryDesc(MemoryDescUtils::makeDummyDesc(*getBaseMemDescAtOutputPort(0)));

Shape newInShape = getBaseMemDescAtInputPort(DATA_ID)->getShape();
if (isDynamicNode()) {
auto originalInDesc = getBaseMemDescAtInputPort(DATA_ID);
auto originalInDims = originalInDesc->getShape().getDims();
size_t dimIdx = originalInDims.size() == 3 ? 1 : 0;
// Propagate N dim from the output shape to the input shape
if (newInShape.getDims()[dimIdx] == Shape::UNDEFINED_DIM &&
getBaseMemDescAtOutputPort(0)->getShape().getDims()[dimIdx] != Shape::UNDEFINED_DIM) {
newInShape = cloneShapeWithNewDim(newInShape, getBaseMemDescAtOutputPort(0)->getShape().getDims()[dimIdx], dimIdx);
}
// Propagate K dim from the weights shape to the input shape
if (newInShape.getDims()[dimIdx+1] == Shape::UNDEFINED_DIM &&
weightDesc->getShape().getDims()[1] != Shape::UNDEFINED_DIM) {
newInShape = cloneShapeWithNewDim(newInShape, weightDesc->getShape().getDims()[1], dimIdx+1);
}

auto newInDesc = DnnlBlockedMemoryDesc(originalInDesc->getPrecision(), MemoryDescUtils::makeDummyShape(newInShape));
inDesc = MemoryDescUtils::convertToDnnlMemoryDesc(MemoryDescUtils::makeDummyDesc(newInDesc));
} else {
inDesc = MemoryDescUtils::convertToDnnlMemoryDesc(MemoryDescUtils::makeDummyDesc(*getBaseMemDescAtInputPort(DATA_ID)));
}

const FCKey key = {inDesc,
weightDesc,
biasDesc,
Expand Down
17 changes: 17 additions & 0 deletions src/plugins/intel_cpu/src/utils/cpu_utils.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,23 @@ inline std::vector<size_t> getNormalizedDimsBySize(const VectorDims &dims, size_
return normalizedDims;
}

/**
* @brief Clones passed shape and replaces one its dimention.
* @param originalShape
* shape to clone
* @param newDimValue
* new dimention value
* @param dim
* dimention index
* @return cloned shape
*/
inline Shape cloneShapeWithNewDim(Shape originalShape, Dim newDimValue, size_t dim) {
VectorDims newDims = originalShape.getDims();
assert(dim < newDims.size());
newDims[dim] = newDimValue;
return Shape(originalShape.getMinDims(), newDims);
}

/**
* @brief Checked that secondInputDims unidirectional broadcastable per tensor or per channel to firstInputDims
* @param firstInputDims
Expand Down

0 comments on commit aaa63c7

Please sign in to comment.