Skip to content

Commit

Permalink
IVGCVSW-6124 ConstTensorsAsInput: Convolution2d
Browse files Browse the repository at this point in the history
 * Add support for Android-nn-driver

!armnn:7382

Signed-off-by: Keith Davis <[email protected]>
Signed-off-by: Kevin May <[email protected]>
Change-Id: I7ace53cf1c8954c1f2d5588387616d7179ef3bf7
  • Loading branch information
keidav01 authored and ArmRyan committed May 19, 2022
1 parent bb7b281 commit 1ebd34e
Show file tree
Hide file tree
Showing 2 changed files with 82 additions and 36 deletions.
46 changes: 29 additions & 17 deletions ConversionUtils.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -1189,7 +1189,8 @@ template<typename HalPolicy,
LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
uint32_t inputIndex,
const HalModel& model,
ConversionData& data)
ConversionData& data,
const armnn::PermutationVector& dimensionMappings = g_DontPermute)
{
using HalOperand = typename HalPolicy::Operand;
using HalOperandType = typename HalPolicy::OperandType;
Expand Down Expand Up @@ -1252,7 +1253,9 @@ LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
case HalOperandLifeTime::CONSTANT_REFERENCE:
{
// The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
ConstTensorPin tensorPin =
ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data, dimensionMappings);

if (tensorPin.IsValid())
{
bool isSupported = false;
Expand Down Expand Up @@ -1302,7 +1305,8 @@ template<typename HalPolicy>
LayerInputHandle ConvertToLayerInputHandle(const ::android::hardware::neuralnetworks::V1_3::Operation& operation,
uint32_t inputIndex,
const::android::hardware::neuralnetworks::V1_3::Model& model,
ConversionData& data)
ConversionData& data,
const armnn::PermutationVector& dimensionMappings = g_DontPermute)
{
using HalOperand = typename HalPolicy::Operand;
using HalOperandType = typename HalPolicy::OperandType;
Expand Down Expand Up @@ -1379,7 +1383,9 @@ LayerInputHandle ConvertToLayerInputHandle(const ::android::hardware::neuralnetw
case HalOperandLifeTime::CONSTANT_REFERENCE:
{
// The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
ConstTensorPin tensorPin =
ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data, dimensionMappings);

if (tensorPin.IsValid())
{
bool isSupported = false;
Expand Down Expand Up @@ -2395,18 +2401,21 @@ bool ConvertConv2d(const HalOperation& operation, const HalModel& model, Convers
const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);

// ArmNN does not currently support non-fixed weights or bias
const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
LayerInputHandle weightsInput = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
if (!weightsInput.IsValid())
{
return Fail("%s: Operation has invalid inputs", __func__);
}

if (!weightsPin.IsValid() || !biasPin.IsValid())
LayerInputHandle biasInput = ConvertToLayerInputHandle<HalPolicy>(operation, 2, model, data); // 1D
if (!biasInput.IsValid())
{
return Fail("%s: Operation has invalid inputs", __func__);
}

armnn::ConstTensor weights = weightsPin.GetConstTensor();
armnn::ConstTensor bias = biasPin.GetConstTensor();
SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
biasInput.SanitizeQuantizationScale(weightsInput, input);
armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo();
armnn::TensorInfo biasInfo = biasInput.GetTensorInfo();

armnn::Convolution2dDescriptor desc;
desc.m_DataLayout = armnn::DataLayout::NHWC;
Expand Down Expand Up @@ -2436,8 +2445,8 @@ bool ConvertConv2d(const HalOperation& operation, const HalModel& model, Convers
return Fail("%s: Operation has invalid inputs", __func__);
}

const uint32_t kernelX = weights.GetShape()[2];
const uint32_t kernelY = weights.GetShape()[1];
const uint32_t kernelX = weightsInfo.GetShape()[2];
const uint32_t kernelY = weightsInfo.GetShape()[1];
const uint32_t inputX = inputInfo.GetShape()[2];
const uint32_t inputY = inputInfo.GetShape()[1];

Expand All @@ -2450,7 +2459,7 @@ bool ConvertConv2d(const HalOperation& operation, const HalModel& model, Convers
}

desc.m_BiasEnabled = true;
armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
armnn::Optional<armnn::TensorInfo> biases(biasInfo);

bool isSupported = false;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
Expand All @@ -2462,7 +2471,7 @@ bool ConvertConv2d(const HalOperation& operation, const HalModel& model, Convers
inputInfo,
outputInfo,
desc,
weights.GetInfo(),
weightsInfo,
biases);
};

Expand All @@ -2480,8 +2489,7 @@ bool ConvertConv2d(const HalOperation& operation, const HalModel& model, Convers
return false;
}

armnn::IConnectableLayer* startLayer =
data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
armnn::IConnectableLayer* startLayer = data.m_Network->AddConvolution2dLayer(desc);

if (!startLayer)
{
Expand All @@ -2490,6 +2498,10 @@ bool ConvertConv2d(const HalOperation& operation, const HalModel& model, Convers

input.Connect(startLayer->GetInputSlot(0));

// Connect weights and bias inputs
weightsInput.Connect(startLayer->GetInputSlot(1));
biasInput.Connect(startLayer->GetInputSlot(2));

return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
data, nullptr, validateFunc, activation);
}
Expand Down
72 changes: 53 additions & 19 deletions ConversionUtils_1_2.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,31 @@ namespace armnn_driver
using namespace armnn;
using namespace android::nn;

template<typename HalPolicy,
typename HalOperation = typename HalPolicy::Operation,
typename HalModel = typename HalPolicy::Model>
bool IsWeightsValid(const HalOperation& operation,
uint32_t inputIndex,
const HalModel& model)
{
using HalOperand = typename HalPolicy::Operand;
using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
if (!operand)
{
Fail("%s: failed to get input operand %i", __func__, inputIndex);
return false;
}

if (operand->lifetime != HalOperandLifeTime::CONSTANT_COPY
&& operand->lifetime != HalOperandLifeTime::CONSTANT_REFERENCE
&& operand->lifetime != HalOperandLifeTime::NO_VALUE)
{
return false;
}
return true;
}

template<typename HalPolicy,
typename HalOperation = typename HalPolicy::Operation,
typename HalModel = typename HalPolicy::Model>
Expand Down Expand Up @@ -381,26 +406,31 @@ bool ConvertConv2d_1_2(const HalOperation& operation, const HalModel& model, Con
// The NNAPI filter is always OHWI [depth_out, filter_height, filter_width, depth_in] but ArmNN expects the
// filter's height and width indices to match the input's height and width indices so we permute it to OIHW if
// the DataLayout is NCHW
const ConstTensorPin weightsPin = (desc.m_DataLayout == DataLayout::NCHW) ?
ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1,
model, data, OHWIToOIHW) :
ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
const ConstTensorPin biasPin =
ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);

if (!weightsPin.IsValid())

if (!IsWeightsValid<HalPolicy>(operation, 1, model) && desc.m_DataLayout == DataLayout::NCHW)
{
return Fail("%s: Operation has invalid weights", __func__);
return Fail("%s: Operation has unsupported weights HalOperandLifeTime", __func__);
}

if (!biasPin.IsValid())
LayerInputHandle weightsInput = (desc.m_DataLayout == DataLayout::NCHW) ?
ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data, OHWIToOIHW) :
ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);

if (!weightsInput.IsValid())
{
return Fail("%s: Operation has invalid biases", __func__);
return Fail("%s: Operation has invalid inputs", __func__);
}

ConstTensor weights = weightsPin.GetConstTensor();
ConstTensor bias = biasPin.GetConstTensor();
SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
LayerInputHandle biasInput = ConvertToLayerInputHandle<HalPolicy>(operation, 2, model, data); // 1D
if (!biasInput.IsValid())
{
return Fail("%s: Operation has invalid inputs", __func__);
}

biasInput.SanitizeQuantizationScale(weightsInput, input);
armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo();
armnn::TensorInfo biasInfo = biasInput.GetTensorInfo();

ActivationFn activation;

Expand All @@ -419,8 +449,8 @@ bool ConvertConv2d_1_2(const HalOperation& operation, const HalModel& model, Con
armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
const uint32_t kernelX = weights.GetShape()[widthIndex];
const uint32_t kernelY = weights.GetShape()[heightIndex];
const uint32_t kernelX = weightsInfo.GetShape()[widthIndex];
const uint32_t kernelY = weightsInfo.GetShape()[heightIndex];
const uint32_t inputX = inputInfo.GetShape()[widthIndex];
const uint32_t inputY = inputInfo.GetShape()[heightIndex];

Expand Down Expand Up @@ -449,7 +479,7 @@ bool ConvertConv2d_1_2(const HalOperation& operation, const HalModel& model, Con
}

desc.m_BiasEnabled = true;
Optional<TensorInfo> biases(bias.GetInfo());
Optional<TensorInfo> biases(biasInfo);

bool isSupported = false;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
Expand All @@ -461,7 +491,7 @@ bool ConvertConv2d_1_2(const HalOperation& operation, const HalModel& model, Con
inputInfo,
outputInfo,
desc,
weights.GetInfo(),
weightsInfo,
biases);
};

Expand All @@ -479,15 +509,16 @@ bool ConvertConv2d_1_2(const HalOperation& operation, const HalModel& model, Con
return false;
}

IConnectableLayer* startLayer =
data.m_Network->AddConvolution2dLayer(desc, weights, Optional<ConstTensor>(bias));
armnn::IConnectableLayer* startLayer = data.m_Network->AddConvolution2dLayer(desc);

if (!startLayer)
{
return Fail("%s: AddConvolution2dLayer failed", __func__);
}

input.Connect(startLayer->GetInputSlot(0));
weightsInput.Connect(startLayer->GetInputSlot(1));
biasInput.Connect(startLayer->GetInputSlot(2));

return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
data, nullptr, validateFunc, activation);
Expand Down Expand Up @@ -1202,8 +1233,11 @@ bool ConvertGroupedConv2d(const HalOperation& operation, const HalModel& model,
return false;
}

ARMNN_NO_DEPRECATE_WARN_BEGIN
IConnectableLayer* convLayer =
data.m_Network->AddConvolution2dLayer(desc, groupWeights, Optional<ConstTensor>(groupBiases));
ARMNN_NO_DEPRECATE_WARN_END

if (!convLayer)
{
return Fail("%s: AddConvolution2dLayer failed", __func__);
Expand Down

0 comments on commit 1ebd34e

Please sign in to comment.