Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

IDF release/v4.4 #6048

Merged
merged 2 commits into from
Dec 21, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions platform.txt

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion tools/platformio-build-esp32.py
Original file line number Diff line number Diff line change
Expand Up @@ -303,7 +303,7 @@
"UNITY_INCLUDE_CONFIG_H",
"WITH_POSIX",
"_GNU_SOURCE",
("IDF_VER", '\\"v4.4-beta1-183-gf23dcd3555\\"'),
("IDF_VER", '\\"v4.4-beta1-189-ga79dc75f0a\\"'),
"ESP_PLATFORM",
"_POSIX_READER_WRITER_LOCKS",
"ARDUINO_ARCH_ESP32",
Expand Down
2 changes: 1 addition & 1 deletion tools/platformio-build-esp32c3.py
Original file line number Diff line number Diff line change
Expand Up @@ -293,7 +293,7 @@
"UNITY_INCLUDE_CONFIG_H",
"WITH_POSIX",
"_GNU_SOURCE",
("IDF_VER", '\\"v4.4-beta1-183-gf23dcd3555\\"'),
("IDF_VER", '\\"v4.4-beta1-189-ga79dc75f0a\\"'),
"ESP_PLATFORM",
"_POSIX_READER_WRITER_LOCKS",
"ARDUINO_ARCH_ESP32",
Expand Down
2 changes: 1 addition & 1 deletion tools/platformio-build-esp32s2.py
Original file line number Diff line number Diff line change
Expand Up @@ -290,7 +290,7 @@
"UNITY_INCLUDE_CONFIG_H",
"WITH_POSIX",
"_GNU_SOURCE",
("IDF_VER", '\\"v4.4-beta1-183-gf23dcd3555\\"'),
("IDF_VER", '\\"v4.4-beta1-189-ga79dc75f0a\\"'),
"ESP_PLATFORM",
"_POSIX_READER_WRITER_LOCKS",
"ARDUINO_ARCH_ESP32",
Expand Down
6 changes: 4 additions & 2 deletions tools/sdk/esp32/include/config/sdkconfig.h
Original file line number Diff line number Diff line change
Expand Up @@ -406,7 +406,9 @@
#define CONFIG_LWIP_IPV6_ND6_NUM_NEIGHBORS 5
#define CONFIG_LWIP_ICMP 1
#define CONFIG_LWIP_MAX_RAW_PCBS 16
#define CONFIG_LWIP_SNTP_MAX_SERVERS 1
#define CONFIG_LWIP_SNTP_MAX_SERVERS 3
#define CONFIG_LWIP_DHCP_GET_NTP_SRV 1
#define CONFIG_LWIP_DHCP_MAX_NTP_SERVERS 1
#define CONFIG_LWIP_SNTP_UPDATE_DELAY 3600000
#define CONFIG_LWIP_ESP_LWIP_ASSERT 1
#define CONFIG_LWIP_HOOK_TCP_ISN_DEFAULT 1
Expand Down Expand Up @@ -677,5 +679,5 @@
#define CONFIG_ULP_COPROC_RESERVE_MEM CONFIG_ESP32_ULP_COPROC_RESERVE_MEM
#define CONFIG_WARN_WRITE_STRINGS CONFIG_COMPILER_WARN_WRITE_STRINGS
#define CONFIG_WIFI_LWIP_ALLOCATION_FROM_SPIRAM_FIRST CONFIG_SPIRAM_TRY_ALLOCATE_WIFI_LWIP
#define CONFIG_ARDUINO_IDF_COMMIT "f23dcd3555"
#define CONFIG_ARDUINO_IDF_COMMIT "a79dc75f0a"
#define CONFIG_ARDUINO_IDF_BRANCH "release/v4.4"
Original file line number Diff line number Diff line change
Expand Up @@ -66,19 +66,18 @@ namespace dl
this->output_exponent = input.exponent;
if (!this->inplace)
{
if (this->output != NULL)
if (this->output == NULL)
{
this->output = new Tensor<feature_t>;
}
this->output->set_exponent(this->output_exponent);
this->output->set_shape(this->output_shape);
this->output->set_shape(input.shape);
this->output->expand_dims(this->axis);
this->output->free_element();
}
else
{
this->output = &input;
this->output->set_shape(this->output_shape);
this->output->expand_dims(this->axis);
}
this->output_shape = this->output->shape;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ namespace dl
this->output_shape = {input.get_size()};
if (!this->inplace)
{
if (this->output != NULL)
if (this->output == NULL)
{
this->output = new Tensor<feature_t>;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,14 @@ namespace dl
namespace layer
{
/**
* @brief LeakyReLU(input).
* @brief LeakyRelu(input).
*
* @tparam feature_t supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
*/
template <typename feature_t>
class LeakyReLU : public Layer
class LeakyRelu : public Layer
{
private:
feature_t activation_alpha; /*<! quantized alpha >*/
Expand All @@ -28,26 +28,26 @@ namespace dl
std::vector<int> output_shape; /*<! output shape of leakyrelu >*/
public:
/**
* @brief Construct a new LeakyReLU object
* @brief Construct a new LeakyRelu object
*
* @param activation_alpha quantized alpha
* @param activation_exponent exponent of quantized alpha
* @param name name of leakyrelu
* @param inplace true: the output will store to input0
* false: the output will store to a separate memory
*/
LeakyReLU(const int activation_alpha, const int activation_exponent, const char *name = "LeakyReLU", bool inplace = false) : Layer(name), output(NULL), output_shape({})
LeakyRelu(const int activation_alpha, const int activation_exponent, const char *name = "LeakyRelu", bool inplace = false) : Layer(name), output(NULL), output_shape({})
{
this->activation_alpha = activation_alpha;
this->activation_exponent = activation_exponent;
this->inplace = inplace;
}

/**
* @brief Destroy the LeakyReLU object
* @brief Destroy the LeakyRelu object
*
*/
~LeakyReLU()
~LeakyRelu()
{
if ((!this->inplace) && (this->output != NULL))
{
Expand All @@ -66,7 +66,7 @@ namespace dl
this->output_shape = input.shape;
if (!this->inplace)
{
if (this->output != NULL)
if (this->output == NULL)
{
this->output = new Tensor<feature_t>;
}
Expand All @@ -90,19 +90,19 @@ namespace dl
/**
* @brief Get the output
*
* @return Tensor<feature_t>& LeakyReLU result
* @return Tensor<feature_t>& LeakyRelu result
*/
Tensor<feature_t> &get_output()
{
return *this->output;
}

/**
* @brief Call LeakyReLU operation.
* @brief Call LeakyRelu operation.
*
* @param input as an input
* @param assign_core not effective yet
* @return LeakyReLU result
* @return LeakyRelu result
*/
Tensor<feature_t> &call(Tensor<feature_t> &input, const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
{
Expand Down Expand Up @@ -130,7 +130,7 @@ namespace dl
{
this->output->set_shape(this->output_shape);
}
nn::leakyrelu<true>(*this->output, input, this->activation_alpha, this->activation_exponent, assign_core);
nn::leakyrelu(*this->output, input, this->activation_alpha, this->activation_exponent, assign_core);
DL_LOG_LAYER_LATENCY_END(this->name, "leakyrelu");
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ namespace dl

if (!this->inplace)
{
if (this->output != NULL)
if (this->output == NULL)
{
this->output = new Tensor<feature_t>;
}
Expand Down Expand Up @@ -132,7 +132,7 @@ namespace dl
{
this->output->set_shape(this->output_shape);
}
nn::max2d<true>(*this->output, input0, input1, assign_core);
nn::max2d(*this->output, input0, input1, assign_core);
DL_LOG_LAYER_LATENCY_END(this->name, "max2d");
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ namespace dl

if (!this->inplace)
{
if (this->output != NULL)
if (this->output == NULL)
{
this->output = new Tensor<feature_t>;
}
Expand Down Expand Up @@ -132,7 +132,7 @@ namespace dl
{
this->output->set_shape(this->output_shape);
}
nn::min2d<true>(*this->output, input0, input1, assign_core);
nn::min2d(*this->output, input0, input1, assign_core);
DL_LOG_LAYER_LATENCY_END(this->name, "min2d");
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ namespace dl

if (!this->inplace)
{
if (this->output != NULL)
if (this->output == NULL)
{
this->output = new Tensor<feature_t>;
}
Expand Down Expand Up @@ -140,7 +140,7 @@ namespace dl
{
this->output->set_shape(this->output_shape);
}
nn::mul2d<true>(*this->output, input0, input1, this->activation, assign_core);
nn::mul2d(*this->output, input0, input1, this->activation, assign_core);
DL_LOG_LAYER_LATENCY_END(this->name, "mul2d");
}

Expand Down
30 changes: 15 additions & 15 deletions tools/sdk/esp32/include/esp-face/include/layer/dl_layer_prelu.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,36 +10,36 @@ namespace dl
namespace layer
{
/**
* @brief PReLU(input).
* @brief PRelu(input).
*
* @tparam feature_t supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
*/
template <typename feature_t>
class PReLU : public Layer
class PRelu : public Layer
{
private:
feature_t *activation_element; /*<! quantized alpha elements along channel axis >*/
const feature_t *activation_element; /*<! quantized alpha elements along channel axis >*/
int activation_exponent; /*<! exponent of quantized alpha elements >*/
Tensor<feature_t> *output; /*<! output ptr of prelu >*/
bool inplace; /*<! true: the output will store to input0
false: the output will store to a separate memory >*/
std::vector<int> output_shape; /*<! output shape of prelu >*/
public:
/**
* @brief Construct a new PReLU object
* @brief Construct a new PRelu object
*
* @param activation_element quantized alpha elements along channel axis
* @param activation_exponent exponent of quantized alpha elements
* @param name name of prelu
* @param inplace true: the output will store to input0
* false: the output will store to a separate memory
*/
PReLU(const feature_t *activation_element,
PRelu(const feature_t *activation_element,
const int activation_exponent = 0,
const char *name = NULL,
bool inplace = "PReLU") : Layer(name),
const char *name = "PRelu",
bool inplace = false) : Layer(name),
activation_element(activation_element),
activation_exponent(activation_exponent),
output(NULL),
Expand All @@ -49,10 +49,10 @@ namespace dl
}

/**
* @brief Destroy the PReLU object
* @brief Destroy the PRelu object
*
*/
~PReLU()
~PRelu()
{
if ((!this->inplace) && (this->output != NULL))
{
Expand All @@ -71,7 +71,7 @@ namespace dl
this->output_shape = input.shape;
if (!this->inplace)
{
if (this->output != NULL)
if (this->output == NULL)
{
this->output = new Tensor<feature_t>;
}
Expand All @@ -94,19 +94,19 @@ namespace dl
/**
* @brief Get the output
*
* @return Tensor<feature_t>& PReLU result
* @return Tensor<feature_t>& PRelu result
*/
Tensor<feature_t> &get_output()
{
return *this->output;
}

/**
* @brief Call PReLU operation.
* @brief Call PRelu operation.
*
* @param input as an input
* @param assign_core not effective yet
* @return PReLU result
* @return PRelu result
*/
Tensor<feature_t> &call(Tensor<feature_t> &input, const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
{
Expand All @@ -125,7 +125,7 @@ namespace dl

DL_LOG_LAYER_LATENCY_START();
nn::prelu(*this->output, input, this->activation_element, this->activation_exponent, assign_core);
DL_LOG_LAYER_LATENCY_END(this->name, "leakyrelu");
DL_LOG_LAYER_LATENCY_END(this->name, "prelu");
}
else
{
Expand All @@ -135,7 +135,7 @@ namespace dl
this->output->set_shape(this->output_shape);
}
nn::prelu(*this->output, input, this->activation_element, this->activation_exponent, assign_core);
DL_LOG_LAYER_LATENCY_END(this->name, "leakyrelu");
DL_LOG_LAYER_LATENCY_END(this->name, "prelu");
}

return *this->output;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ namespace dl
this->output_shape = input.shape;
if (!this->inplace)
{
if (this->output != NULL)
if (this->output == NULL)
{
this->output = new Tensor<feature_t>;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -64,19 +64,21 @@ namespace dl
this->output_exponent = input.exponent;
if (!this->inplace)
{
if (this->output != NULL)
if (this->output == NULL)
{
this->output = new Tensor<feature_t>;
}
this->output->set_exponent(this->output_exponent);
this->output->set_shape(this->output_shape);
this->output->set_shape(input.shape);
this->output->reshape(this->output_shape);
this->output->free_element();
}
else
{
this->output = &input;
this->output->set_shape(this->output_shape);
this->output->reshape(this->output_shape);
}
this->output_shape = this->output->shape;

if (print_shape)
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ namespace dl
this->output_exponent = input.exponent;
if (!this->inplace)
{
if (this->output != NULL)
if (this->output == NULL)
{
this->output = new Tensor<feature_t>;
}
Expand All @@ -78,7 +78,6 @@ namespace dl
else
{
this->output = &input;
this->output->set_shape(input.shape);
this->output->squeeze(this->axis);
}
this->output_shape = this->output->shape;
Expand Down
Loading