Skip to content

Commit

Permalink
修改layer.forward逻辑
Browse files Browse the repository at this point in the history
  • Loading branch information
zjhellofss committed Mar 13, 2023
1 parent 74dc1d7 commit e9f5570
Show file tree
Hide file tree
Showing 3 changed files with 76 additions and 51 deletions.
51 changes: 31 additions & 20 deletions include/layer/abstract/layer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,23 +4,20 @@

#ifndef KUIPER_INFER_SOURCE_LAYER_LAYER_HPP_
#define KUIPER_INFER_SOURCE_LAYER_LAYER_HPP_
#include <glog/logging.h>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <memory>
#include <glog/logging.h>

#include "status_code.hpp"
#include "data/tensor.hpp"
#include "runtime/runtime_op.hpp"
#include "status_code.hpp"

namespace kuiper_infer {

class RuntimeOperator;
class Layer {
public:
explicit Layer(std::string layer_name) : layer_name_(std::move(layer_name)) {

}
explicit Layer(std::string layer_name) : layer_name_(std::move(layer_name)) {}

virtual ~Layer() = default;

Expand All @@ -30,50 +27,64 @@ class Layer {
* @param outputs 层的输出
* @return 执行的状态
*/
virtual InferStatus Forward(const std::vector<std::shared_ptr<Tensor<float>>> &inputs,
std::vector<std::shared_ptr<Tensor<float>>> &outputs);
virtual InferStatus Forward(
const std::vector<std::shared_ptr<Tensor<float>>>& inputs,
std::vector<std::shared_ptr<Tensor<float>>>& outputs);

/**
* Layer的执行函数
* @param current_operator 当前的operator
* @return 执行的状态
*/
virtual InferStatus Forward();

/**
* 返回层的权重
* @return 返回的权重
*/
virtual const std::vector<std::shared_ptr<Tensor<float>>> &weights() const;
virtual const std::vector<std::shared_ptr<Tensor<float>>>& weights() const;

/**
* 返回层的偏移量
* @return 返回的偏移量
*/
virtual const std::vector<std::shared_ptr<Tensor<float>>> &bias() const;
virtual const std::vector<std::shared_ptr<Tensor<float>>>& bias() const;

virtual void set_weights(const std::vector<std::shared_ptr<Tensor<float>>> &weights);
virtual void set_weights(
const std::vector<std::shared_ptr<Tensor<float>>>& weights);

/**
* 设置Layer的偏移量
* @param bias 偏移量
*/
virtual void set_bias(const std::vector<std::shared_ptr<Tensor<float>>> &bias);
virtual void set_bias(
const std::vector<std::shared_ptr<Tensor<float>>>& bias);

/**
* 设置Layer的权重
* @param weights 权重
*/
virtual void set_weights(const std::vector<float> &weights);
virtual void set_weights(const std::vector<float>& weights);

/**
* 设置Layer的偏移量
* @param bias 偏移量
*/
virtual void set_bias(const std::vector<float> &bias);
virtual void set_bias(const std::vector<float>& bias);

/**
* 返回层的名称
* @return 层的名称
*/
virtual const std::string &layer_name() const { return this->layer_name_; }
virtual const std::string& layer_name() const { return this->layer_name_; }

void set_runtime_operator(
const std::shared_ptr<RuntimeOperator>& runtime_operator);

protected:
std::string layer_name_; /// Layer的名称
std::weak_ptr<RuntimeOperator> runtime_operator_;
std::string layer_name_; /// Layer的名称
};

}
#endif //KUIPER_INFER_SOURCE_LAYER_LAYER_HPP_
} // namespace kuiper_infer
#endif // KUIPER_INFER_SOURCE_LAYER_LAYER_HPP_
53 changes: 43 additions & 10 deletions source/layer/abstract/layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,34 +4,67 @@
#include "layer/abstract/layer.hpp"
namespace kuiper_infer {

const std::vector<std::shared_ptr<Tensor<float>>> &Layer::weights() const {
const std::vector<std::shared_ptr<Tensor<float>>>& Layer::weights() const {
LOG(FATAL) << this->layer_name_ << " layer not implement yet!";
}

const std::vector<std::shared_ptr<Tensor<float>>> &Layer::bias() const {
const std::vector<std::shared_ptr<Tensor<float>>>& Layer::bias() const {
LOG(FATAL) << this->layer_name_ << " layer not implement yet!";
}

void Layer::set_bias(const std::vector<float> &bias) {
void Layer::set_bias(const std::vector<float>& bias) {
LOG(FATAL) << this->layer_name_ << " layer not implement yet!";
}

void Layer::set_bias(const std::vector<std::shared_ptr<Tensor<float>>> &bias) {
void Layer::set_bias(const std::vector<std::shared_ptr<Tensor<float>>>& bias) {
LOG(FATAL) << this->layer_name_ << " layer not implement yet!";
}

void Layer::set_weights(const std::vector<float> &weights) {
void Layer::set_weights(const std::vector<float>& weights) {
LOG(FATAL) << this->layer_name_ << " layer not implement yet!";
}

void Layer::set_weights(const std::vector<std::shared_ptr<Tensor<float>>> &weights) {
void Layer::set_weights(
const std::vector<std::shared_ptr<Tensor<float>>>& weights) {
LOG(FATAL) << this->layer_name_ << " layer not implement yet!";
}


InferStatus Layer::Forward(const std::vector<std::shared_ptr<Tensor<float>>> &inputs,
std::vector<std::shared_ptr<Tensor<float>>> &outputs) {
InferStatus Layer::Forward(
const std::vector<std::shared_ptr<Tensor<float>>>& inputs,
std::vector<std::shared_ptr<Tensor<float>>>& outputs) {
LOG(FATAL) << this->layer_name_ << " layer not implement yet!";
}

}
InferStatus Layer::Forward() {
LOG_IF(FATAL, this->runtime_operator_.expired())
<< "Runtime operator is expired or nullptr";
const auto& runtime_operator_sp = this->runtime_operator_.lock();
// 准备节点layer计算所需要的输入
const std::vector<std::shared_ptr<RuntimeOperand>>& input_operand_datas =
runtime_operator_sp->input_operands_seq;
// layer的输入
std::vector<std::shared_ptr<Tensor<float>>> layer_input_datas;
for (const auto& input_operand_data : input_operand_datas) {
for (const auto& input_data : input_operand_data->datas) {
layer_input_datas.push_back(input_data);
}
}

CHECK(!layer_input_datas.empty())
<< runtime_operator_sp->name << " Layer input data is empty";
CHECK(runtime_operator_sp->output_operands != nullptr &&
!runtime_operator_sp->output_operands->datas.empty())
<< "Layer output data is empty";
// 执行operator当中的layer计算过程
// layer的计算结果存放在current_op->output_operands->datas中
InferStatus status = runtime_operator_sp->layer->Forward(
layer_input_datas, runtime_operator_sp->output_operands->datas);
return status;
}

void Layer::set_runtime_operator(
const std::shared_ptr<RuntimeOperator>& runtime_operator) {
this->runtime_operator_ = runtime_operator;
}

} // namespace kuiper_infer
23 changes: 2 additions & 21 deletions source/runtime/runtime_ir.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,7 @@ void RuntimeGraph::Build(const std::string& input_name,
CHECK(layer != nullptr) << "Layer create failed!";
if (layer) {
kOperator->layer = layer;
layer->set_runtime_operator(kOperator);
}
}
}
Expand Down Expand Up @@ -175,7 +176,6 @@ std::vector<std::shared_ptr<Tensor<float>>> RuntimeGraph::Forward(
output_op = output_operators_maps_.at(output_name_);
}


// 输入和输出算子一般唯一
// 执行队列中添加输入算子
std::deque<std::shared_ptr<RuntimeOperator>> operator_queue;
Expand Down Expand Up @@ -214,28 +214,9 @@ std::vector<std::shared_ptr<Tensor<float>>> RuntimeGraph::Forward(
std::string current_op_name = current_op->name;
CHECK_EQ(CheckOperatorReady(current_op), true)
<< "Current operator " << current_op->name << " is not ready!";
// 准备节点layer计算所需要的输入
const std::vector<std::shared_ptr<RuntimeOperand>>& input_operand_datas =
current_op->input_operands_seq;
// layer的输入
std::vector<std::shared_ptr<Tensor<float>>> layer_input_datas;
for (const auto& input_operand_data : input_operand_datas) {
for (const auto& input_data : input_operand_data->datas) {
layer_input_datas.push_back(input_data);
}
}

CHECK(!layer_input_datas.empty())
<< current_op->name << " Layer input data is empty";
CHECK(current_op->output_operands != nullptr &&
!current_op->output_operands->datas.empty())
<< "Layer output data is empty";

const auto& start = std::chrono::steady_clock::now();
// 执行operator当中的layer计算过程
// layer的计算结果存放在current_op->output_operands->datas中
InferStatus status = current_op->layer->Forward(
layer_input_datas, current_op->output_operands->datas);
InferStatus status = current_op->layer->Forward();

CHECK(status == InferStatus::kInferSuccess)
<< current_op->layer->layer_name()
Expand Down

0 comments on commit e9f5570

Please sign in to comment.