Skip to content

Commit

Permalink
[onert-micro] Bring up SparseCrossEntropy Loss Function
Browse files Browse the repository at this point in the history
This commit adds new loss function "SparseCrossEntropy"
Since this function has different number compare to one-hot-encoded target, offset change condition was added.

Signed-off-by: Jungwoo Lee <[email protected]>
  • Loading branch information
ljwoo94 committed Jul 26, 2024
1 parent 8f37c80 commit 99d57ef
Show file tree
Hide file tree
Showing 5 changed files with 105 additions and 1 deletion.
1 change: 1 addition & 0 deletions onert-micro/onert-micro/include/OMConfig.h
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ enum OMLoss
CROSS_ENTROPY,
MSE,
MAE,
SPARSE_CROSS_ENTROPY,
};

/*
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
/*
* Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef ONERT_MICRO_TRAIN_LOSSES_FUNCTIONS_SPARSE_CROSS_ENTROPY_H
#define ONERT_MICRO_TRAIN_LOSSES_FUNCTIONS_SPARSE_CROSS_ENTROPY_H

#include "OMStatus.h"

#include <cstdint>

namespace onert_micro
{
namespace train
{
namespace losses_functions
{

// Cross Entropy
struct SparseCrossEntropy
{
// Calculate sparse cross entropy error backpropagation between calculated and target data
static void calculateErrorBackpropagation(const uint32_t flat_size, const float *calculated_data,
const float *target_data, float *output_grad);
};

} // namespace losses_functions
} // namespace train
} // namespace onert_micro

#endif // ONERT_MICRO_TRAIN_LOSSES_FUNCTIONS_SPARSE_CROSS_ENTROPY_H
17 changes: 16 additions & 1 deletion onert-micro/onert-micro/src/core/train/OMTrainingHandler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
#include "core/train/OMTrainingHandler.h"
#include "train/losses_functions/MSE.h"
#include "train/losses_functions/CrossEntropy.h"
#include "train/losses_functions/SparseCrossEntropy.h"
#include "train/metrics/MSE.h"
#include "train/metrics/CrossEntropy.h"
#include "train/metrics/Accuracy.h"
Expand Down Expand Up @@ -56,11 +57,18 @@ OMStatus OMTrainingHandler::handleError(const OMConfig &config, OMRuntimeStorage
OMStatus status = forward_storage.getDataByTensorIndex(&calculated_data, forward_output_index);
assert(calculated_data != nullptr);

OMLoss loss_type = config.training_context.loss;

// Get target data
auto data_type_size = sizeof(core::OMDataType(forward_output_tensor->type()));
size_t offset = batch_num * data_type_size * flat_size;

// Need to check loss type to control proper offset.
if (loss_type == SPARSE_CROSS_ENTROPY)
{
offset = batch_num * data_type_size;
}
uint8_t *target_data = _training_storage.getTargetData(i) + offset;
OMLoss loss_type = config.training_context.loss;

// Allocate data for error gradient for current calculated data and target data
uint8_t *output_grad_data;
Expand All @@ -85,6 +93,13 @@ OMStatus OMTrainingHandler::handleError(const OMConfig &config, OMRuntimeStorage
reinterpret_cast<float *>(target_data), reinterpret_cast<float *>(output_grad_data));
break;
}
case SPARSE_CROSS_ENTROPY:
{
losses_functions::SparseCrossEntropy::calculateErrorBackpropagation(
flat_size, reinterpret_cast<float *>(calculated_data),
reinterpret_cast<float *>(target_data), reinterpret_cast<float *>(output_grad_data));
break;
}
default:
{
assert(false && "Unsupported loss type");
Expand Down
1 change: 1 addition & 0 deletions onert-micro/onert-micro/src/train/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ set(SOURCES
OMBackpropExecutionBuilder.cpp
losses_functions/MSE.cpp
losses_functions/CrossEntropy.cpp
losses_functions/SparseCrossEntropy.cpp
metrics/CrossEntropy.cpp
metrics/MAE.cpp
metrics/MSE.cpp
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
/*
* Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "train/losses_functions/SparseCrossEntropy.h"
#include <cmath>

using namespace onert_micro;
using namespace onert_micro::train;
using namespace onert_micro::train::losses_functions;

/*
* dE/dZi = (dE/dy) * (dy / dZi)
* where Z - vector of logits,
* y - probaility of target.
*
* Since dE/dy = -(1/y),
* (true label) if i == y : dE/dZi = py - 1 = y - 1
* (wrong label) else : dE/dZi = pj
*
*/
void SparseCrossEntropy::calculateErrorBackpropagation(const uint32_t flat_size,
const float *calculated_data,
const float *target_data, float *output_grad)
{
uint32_t label = static_cast<uint32_t>(target_data[0]);

for (uint32_t i = 0; i < flat_size; ++i)
{
output_grad[i] = (calculated_data[i] + float(10.0e-32)) - (i == label);
}
}

0 comments on commit 99d57ef

Please sign in to comment.