Skip to content
This repository has been archived by the owner on Dec 1, 2021. It is now read-only.

use byte arrays to remove inexact floating point number literals #1079

Open
wants to merge 6 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion blueoil/converter/core/optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -437,7 +437,7 @@ def pad_to_multiple_of_b(tensor, axis, b):
quantized_constant = Constant(
weight_quantizer.name + '_new',
PackedUint32(),
data=np.vectorize(lambda k: (~k) & ((0x1 << 32) - 1))(data),
data=np.vectorize(lambda k: (~k) & ((0x1 << 32) - 1))(data).astype(np.uint32),
dimension_format="OHWI",
transposed_dimension_format="OhIhHWOlIl",
packed=True,
Expand Down
20 changes: 20 additions & 0 deletions blueoil/converter/template.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,28 @@
# limitations under the License.
# =============================================================================
from os import path
import struct

from jinja2 import Environment as JinjaEnv
from jinja2 import FileSystemLoader
import numpy as np


def pack_to_bytes(a):
t = type(a)
if t == float or t == np.float32:
packed_binary = struct.pack("<f", a)
elif t == int or t == np.int32:
packed_binary = struct.pack("<i", a)
elif t == np.int64:
packed_binary = struct.pack("<q", a)
elif t == np.uint32:
packed_binary = struct.pack("<I", a)

if t == np.int64:
return ",".join(map(str, list(struct.unpack("BBBBBBBB", packed_binary))))
else:
return ",".join(map(str, list(struct.unpack("BBBB", packed_binary))))


class Template(object):
Expand Down Expand Up @@ -59,5 +78,6 @@ def _save_string_as_file(self, string, file_path):
def _create_jinja(self):
loader = FileSystemLoader(self.root_dir, encoding='utf8')
jinja = JinjaEnv(loader=loader)
jinja.globals['pack_to_bytes'] = pack_to_bytes

return jinja
15 changes: 9 additions & 6 deletions blueoil/converter/templates/manual/consts/input.tpl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ limitations under the License.

{% if node.transposed_data %}

/* TODO(tokunaga): stop using floating point literal, use opaque unsigned char representation instead. */
#ifdef RUN_ON_FPGA
static Base<{{ node.dtype.cpptype() }}>::type {{ node.name }}_raw[] = {
{% for d in node.transposed_data -%}
Expand All @@ -33,10 +34,11 @@ static constexpr decltype({{ node.name }}_output)::tensor_info_t<std::size_t> {{
const TensorView<{{ node.dtype.cpptype() }}, MemoryLayout::{{ node.transposed_dimension_format }}> {{ node.name }}_output(
reinterpret_cast<{{ node.dtype.cpptype() }}*>({{ node.name }}_raw),
{{ node.name }}_shape);

#elif defined USE_NEON || defined USE_AVX
static Base<{{ node.dtype.cpptype() }}>::type {{ node.name }}_raw[] = {
alignas(16) static unsigned char {{ node.name }}_raw[] = {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Where from 16-byte align?

{% for d in node.data.flatten() -%}
{{- d -}},
{{- pack_to_bytes(d) -}},
{%- endfor %}
};
static constexpr decltype({{ node.name }}_output)::tensor_info_t<std::size_t> {{ node.name }}_shape = {
Expand All @@ -47,10 +49,11 @@ static constexpr decltype({{ node.name }}_output)::tensor_info_t<std::size_t> {{
const TensorView<{{ node.dtype.cpptype() }}, MemoryLayout::{{ node.dimension }}> {{ node.name }}_output(
reinterpret_cast<{{ node.dtype.cpptype() }}*>({{ node.name }}_raw),
{{ node.name }}_shape);

#else
static Base<{{ node.dtype.cpptype() }}>::type {{ node.name }}_raw[] = {
alignas(16) static unsigned char {{ node.name }}_raw[] = {
{% for d in node.kn2row_data -%}
{{- d -}},
{{- pack_to_bytes(d) -}},
{%- endfor %}
};
static constexpr decltype({{ node.name }}_output)::tensor_info_t<std::size_t> {{ node.name }}_shape = {
Expand All @@ -65,9 +68,9 @@ const TensorView<{{ node.dtype.cpptype() }}, MemoryLayout::{{ node.kn2row_dimens

{% else -%}

static Base<{{ node.dtype.cpptype() }}>::type {{ node.name }}_raw[] = {
alignas(16) static unsigned char {{ node.name }}_raw[] = {
{% for d in node.data.flatten() -%}
{{- d -}},
{{- pack_to_bytes(d) -}},
{%- endfor %}
};
static constexpr decltype({{ node.name }}_output)::tensor_info_t<std::size_t> {{ node.name }}_shape = {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,16 +22,20 @@ namespace scaling_factors {

{% if conv.quantizer.op_type == 'BinaryMeanScalingQuantizer' -%}

T_FLOAT {{ conv.name }} = {{ conv.quantizer.scaling_factor }};
alignas(16) unsigned char {{ conv.name }}_raw[] = { {{ pack_to_bytes(conv.quantizer.scaling_factor) }} };

T_FLOAT {{ conv.name }} = *(reinterpret_cast<float*>({{ conv.name }}_raw));

{% elif conv.quantizer.op_type == 'BinaryChannelWiseMeanScalingQuantizer' -%}

T_FLOAT {{ conv.name }}[{{ conv.depth }}] = {
alignas(16) unsigned char {{ conv.name }}_raw[] = {
{% for f in conv.quantizer.scaling_factor -%}
{{- f -}},
{{- pack_to_bytes(f) -}},
{%- endfor %}
};

T_FLOAT* {{ conv.name }} = reinterpret_cast<float*>({{ conv.name }}_raw);

{% else -%}

OtherQuantizerScalingFactorNotImplemented
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ extern T_FLOAT {{ conv.name }};

{% elif conv.quantizer.op_type == 'BinaryChannelWiseMeanScalingQuantizer' -%}

extern T_FLOAT {{ conv.name }}[{{ conv.depth }}];
extern T_FLOAT* {{ conv.name }};

{% else -%}

Expand Down