Skip to content

Re arch rc1 mobilenet fp1 #201

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 14 commits into
base: develop
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ sd-driver
#Project generated test files
python/test_scripts/testData
python/.venv
__pycache__

#Vim stuff
*.swp
Expand Down
32 changes: 32 additions & 0 deletions TESTS/tensors/test_romtensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -125,3 +125,35 @@ TEST(Rom_Tensor, read_write_i16) {
cout << "uint16 Sizeof IntegralValue " << sizeof(IntegralValue(5)) << endl;
delete[] buffer;
}

TEST(ScalarRom_Tensor, read_write_i8) {
///setup_context();
localCircularArenaAllocator<256> meta_allocator;
localCircularArenaAllocator<256> ram_allocator;
Context::get_default_context()->set_metadata_allocator(&meta_allocator);
Context::get_default_context()->set_ram_data_allocator(&ram_allocator);
int8_t* buffer = new int8_t[1];
buffer[0] = 5;
ScalarRomTensor r({1}, i8, buffer);
int8_t read = r(2,2);
EXPECT_EQ(read, 5);
cout << "i8 Sizeof IntegralValue " << sizeof(IntegralValue(5)) << endl;
cout << "Sizeof RomTensor " << sizeof(r) << endl;
delete[] buffer;
}

TEST(ScalarRom_Tensor, read_write_flt) {
///setup_context();
localCircularArenaAllocator<256> meta_allocator;
localCircularArenaAllocator<256> ram_allocator;
Context::get_default_context()->set_metadata_allocator(&meta_allocator);
Context::get_default_context()->set_ram_data_allocator(&ram_allocator);
float* buffer = new float[1];
buffer[0] = 5.0;
ScalarRomTensor r({1}, flt, buffer);
float read = r(2,2);
EXPECT_NEAR(read, 5.0, 0.0001);
cout << "float Sizeof IntegralValue " << sizeof(IntegralValue(5)) << endl;
cout << "Sizeof RomTensor " << sizeof(r) << endl;
delete[] buffer;
}
Empty file added python/test_scripts/__init__.py
Empty file.
24 changes: 24 additions & 0 deletions python/test_scripts/gen_softmax.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
from jinja_env import env2, Operator, Tensor, SingleOpTest
import tensorflow as tf

test_group = "Softmax"

def gen_test(test_number, scale = 1.0):
test_name = "random_gen_scale_%d__%d" % ( int(scale), test_number)
in1 = tf.constant(tf.random.uniform([1,10])*scale).numpy()
out_1 = tf.nn.softmax(in1).numpy()

in_ref_name = "s_ref_in_%d" % test_number
out_ref_name = "s_ref_out_%d" % test_number
in_t = Tensor("in", in1, ref_name=in_ref_name)
out_ref = Tensor("out_ref", out_1, ref_name=out_ref_name) # Store the reference out values
out_t = Tensor("out", out_1)
#conv_param_str = "{%s}, %s" % (str(strides).lstrip('[').rstrip(']'), padding)
#convOp = Operator("Conv2dOperator", "op_0", dtypes=["float"], param_str=conv_param_str)
op = Operator("SoftmaxOperator", "softmaxOp", dtypes=["float"])
op.set_inputs({"in": in_t}).set_outputs({"out": out_t})

test = SingleOpTest(test_group, test_name, op)
test.add_tensor_comparison(out_t, out_ref)
test_rendered, const_snippets = test.render()
print(test_rendered)
174 changes: 164 additions & 10 deletions python/test_scripts/jinja_env/__init__.py
Original file line number Diff line number Diff line change
@@ -1,23 +1,177 @@
import jinja2
from pathlib import Path
import numpy as np

_template_dir = Path(__file__).parent / "templates"
_template2_dir = Path(__file__).parent / "templates_v2"

env = jinja2.Environment(
loader=jinja2.FileSystemLoader(_template_dir), trim_blocks=True, lstrip_blocks=True
)
)
env.globals.update(
zip=zip,
len=len,
TENSOR_TYPE_MAP={
"int8_t": "i8",
"uint8_t": "u8",
"int16_t": "i16",
"uint16_t": "u16",
"int32_t": "i32",
"uint32_t": "u32",
"float": "flt",
},
)
"int8_t": "i8",
"uint8_t": "u8",
"int16_t": "i16",
"uint16_t": "u16",
"int32_t": "i32",
"uint32_t": "u32",
"float": "flt",
},
)

del _template_dir

env2 = jinja2.Environment(
loader=jinja2.FileSystemLoader(_template2_dir), trim_blocks=True, lstrip_blocks=True
)
env2.globals.update(
zip=zip,
len=len,
TENSOR_TYPE_MAP={
"int8_t": "i8",
"uint8_t": "u8",
"int16_t": "i16",
"uint16_t": "u16",
"int32_t": "i32",
"uint32_t": "u32",
"float": "flt",
},
NUMPY_2_CMAP={
np.int8: "int8_t",
np.uint8: "uint8_t",
np.int16: "int16_t",
np.uint16: "uint16_t",
np.int32: "int32_t",
np.uint32: "uint32_t",
np.float: "float",
np.dtype('float32'): "float",
},
)

TENSOR_TYPE_MAP={
"int8_t": "i8",
"uint8_t": "u8",
"int16_t": "i16",
"uint16_t": "u16",
"int32_t": "i32",
"uint32_t": "u32",
"float": "flt",
}
NUMPY_2_CMAP={
np.int8: "int8_t",
np.uint8: "uint8_t",
np.int16: "int16_t",
np.uint16: "uint16_t",
np.int32: "int32_t",
np.uint32: "uint32_t",
np.float: "float",
np.dtype('float32'): "float",
}

class Tensor:
def __init__(self, name, np_array, ref_name=None, quantize_params=[]):
self.name = name
self.np_array = np_array
self.ref_name = ref_name
self.quantize_params = quantize_params

@property
def shape(self):
return self.np_array.shape

@property
def dtype(self):
return NUMPY_2_CMAP[self.np_array.dtype]

@property
def utype(self):
return TENSOR_TYPE_MAP[self.dtype]

def flatten(self):
return self.np_array.flatten()

def render_constant(self):
if self.ref_name:
return env2.get_template('def_constant.hpp').render(tensor=self)
else:
return ""
def render_declaration(self):
if self.ref_name:
return env2.get_template('declare_rom_tensor.cpp').render(tensor=self)
else:
return env2.get_template('declare_ram_tensor.cpp').render(tensor=self)


class Operator:
def __init__(self, op_type, name, dtypes=[], param_str=None):
self.op_type = op_type
self.name = name
self.dtypes = dtypes
self.param_str = param_str
self.array_template = env2.get_template('array_template.cpp')
self.input_map = {}
self.output_map = {}
self.type_signature = env2.get_template('op_type_signature.cpp').render(op=self)

def set_inputs(self, input_map):
self.input_map = input_map
return self

def set_outputs(self, output_map):
self.output_map = output_map
return self

def render_declaration(self):
return env2.get_template('declare_operator.cpp').render(op=self)

def render_eval(self):
return env2.get_template('eval_operator.cpp').render(op=self)

class SingleOpTest:
def __init__(self, test_group, test_name, target_op):
self.test_group = test_group
self.test_name = test_name
self.out_size = 0
for out_tensor in target_op.output_map:
self.out_size += len(target_op.output_map[out_tensor].flatten())
self.target_op = target_op
self.compare_tensors = []
self.tensor_set = set()
for tensor in target_op.input_map:
self.tensor_set.add(target_op.input_map[tensor])
for tensor in target_op.output_map:
self.tensor_set.add(target_op.output_map[tensor])

def add_tensor_comparison(self, a, b):
self.compare_tensors.append((a,b))
self.tensor_set.add(a)
self.tensor_set.add(b)

def render(self):
const_snippets = []
tensor_decls = []
for tensor in self.tensor_set:
const_snippets.append(tensor.render_constant())
tensor_decls.append(tensor.render_declaration())
op_decl = self.target_op.render_declaration()
op_eval = self.target_op.render_eval()

compare_snippets = []
for a, b in self.compare_tensors:
compare_snippets.append(env2.get_template('compare_outputs.cpp').render(a=a, b=b))

TestTemplate = env2.get_template('test_container.cpp')
test_rendered = TestTemplate.render(test_group= self.test_group,
test_name = self.test_name,
out_size = self.out_size,
tensor_declarations = tensor_decls,
op_decl = op_decl,
op_eval = op_eval,
compare_snippets=compare_snippets)
return (test_rendered, const_snippets)


del _template2_dir
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{% for x in arr %}{{ x }}{{ "," if not loop.last }}{% endfor %}
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
for(int i = 0; i < {{ len(b.flatten()) }}; i++) {
EXPECT_NEAR(static_cast<{{ a.dtype }}>( {{ a.name }}(i) ), static_cast<{{ b.dtype }}>( {{ b.name }}(i) ), 0.0001);
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
#ifndef {{ constants_header | replace(".", "_") }}
#define {{ constants_header | replace(".", "_") }}
{% for constant_snippet in constants %}
{{ constant_snippet }}
{% endfor %}
#endif
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{{ op.type_signature }} {{ op.name }}{% if op.param_str %}({{ op.param_str }}){% endif %};
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
Tensor {{tensor.name}} = new RamTensor({ {%for s in tensor.shape%}{{ s }}{{"," if not loop.last}}{%endfor%} }, {{ tensor.utype }});
{%if tensor.quantize_params%}
{{tensor.name}}->set_quantization_params(PerTensorQuantizationParams({{tensor.quantize_params[1]}}, {{tensor.quantize_params[0]}}));
{%endif%}
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
Tensor {{ tensor.name }} = new RomTensor({ {% for s in tensor.shape %}{{ s }}{{"," if not loop.last}}{% endfor %} }, {{ tensor.utype }}, {{ tensor.ref_name }});
{%if tensor.quantize_params%}
{{tensor.name}}->set_quantization_params(PerTensorQuantizationParams({{tensor.quantize_params[1]}}, {{tensor.quantize_params[0]}}));
{%endif%}
3 changes: 3 additions & 0 deletions python/test_scripts/jinja_env/templates_v2/def_constant.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
static const {{ tensor.dtype }} {{ tensor.ref_name }}[{{ len(tensor.flatten()) }}] = {
{% for x in tensor.flatten() %} {{ x }}{{ "," if not loop.last }}{{ "\n" if not loop.first and loop.index % 10 == 0}} {% endfor %}
};
10 changes: 10 additions & 0 deletions python/test_scripts/jinja_env/templates_v2/eval_operator.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
{{op.name}}
.set_inputs({
{% for x in op.input_map %}
{ {{op.type_signature}}::{{x}}, {{op.input_map[x].name}} }{{"," if not loop.last}}
{% endfor %}
}).set_outputs({
{% for x in op.output_map %}
{ {{op.type_signature}}::{{x}}, {{op.output_map[x].name}} }{{"," if not loop.last}}
{% endfor %}
}).eval();
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{{ op.op_type }}{% if op.dtypes %}<{{ op.array_template.render(arr=op.dtypes) }}>{% endif %}
20 changes: 20 additions & 0 deletions python/test_scripts/jinja_env/templates_v2/test_container.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
/***************************************
* Generated Test
***************************************/

TEST({{ test_group }}, {{ test_name }}) {
localCircularArenaAllocator<1024> meta_allocator;
localCircularArenaAllocator<{{ out_size }}*2*sizeof(float), uint32_t> ram_allocator;
Context::get_default_context()->set_metadata_allocator(&meta_allocator);
Context::get_default_context()->set_ram_data_allocator(&ram_allocator);

{% for tensor_decl in tensor_declarations %}
{{ tensor_decl }}{% endfor %}

{{ op_decl }}
{{ op_eval }}

{% for compare_snippet in compare_snippets %}
{{ compare_snippet }}
{% endfor %}
}
2 changes: 2 additions & 0 deletions src/uTensor/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@ set(src_utensor_tensors
)
set(src_utensor_ops
ops/Matrix.cpp
ops/Matrix_kernels.cpp
ops/Convolution_kernels.cpp
)
set(src_utensor_errhndl
errorHandlers/SimpleErrorHandler.cpp
Expand Down
8 changes: 6 additions & 2 deletions src/uTensor/core/quantizationPrimitives.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -83,11 +83,15 @@ void QuantizationParamsHandle::free() {
QuantizationParamsHandle::QuantizationParamsHandle() : Handle() {}
QuantizationParamsHandle::QuantizationParamsHandle(QuantizationParams* ptr) : Handle((void*)ptr) {
// Context::get_default_context()->get_metadata_allocator()->bind(_ptr, this);
bind(*this, *Context::get_default_context()->get_metadata_allocator());
if(ptr){
bind(*this, *Context::get_default_context()->get_metadata_allocator());
}
}
QuantizationParamsHandle& QuantizationParamsHandle::operator=(QuantizationParams* ptr) {
_ptr = (void*)ptr;
bind(*this, *Context::get_default_context()->get_metadata_allocator());
if(ptr){
bind(*this, *Context::get_default_context()->get_metadata_allocator());
}
// Context::get_metadata_allocator()->bind(_ptr, this);
return *this;
}
Expand Down
Loading