Skip to content

Fix as_tensor in onnx_text_plot_tree #101

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 6 commits into from
Jul 21, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions CHANGELOGS.rst
Original file line number Diff line number Diff line change
@@ -1,6 +1,11 @@
Change Logs
===========

0.3.2
+++++

* :pr:`101`: fix as_tensor in onnx_text_plot_tree

0.3.1
+++++

Expand Down
20 changes: 18 additions & 2 deletions _unittests/ut_light_api/test_backend_export.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,11 @@
make_opsetid,
make_tensor_value_info,
)
from onnx.reference.op_run import to_array_extended

try:
from onnx.reference.op_run import to_array_extended
except ImportError:
from onnx.numpy_helper import to_array as to_array_extended
from onnx.numpy_helper import from_array, to_array
from onnx.backend.base import Device, DeviceType
from onnx_array_api.reference import ExtendedReferenceEvaluator
Expand Down Expand Up @@ -240,7 +244,19 @@ def run_node(cls, node, inputs, device=None, outputs_info=None, **kwargs):
raise NotImplementedError("Unable to run the model node by node.")


backend_test = onnx.backend.test.BackendTest(ExportBackend, __name__)
dft_atol = 1e-3 if sys.platform != "linux" else 1e-5
backend_test = onnx.backend.test.BackendTest(
ExportBackend,
__name__,
test_kwargs={
"test_dft": {"atol": dft_atol},
"test_dft_axis": {"atol": dft_atol},
"test_dft_axis_opset19": {"atol": dft_atol},
"test_dft_inverse": {"atol": dft_atol},
"test_dft_inverse_opset19": {"atol": dft_atol},
"test_dft_opset19": {"atol": dft_atol},
},
)

# The following tests are too slow with the reference implementation (Conv).
backend_test.exclude(
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import os
import platform
import sys
import unittest
from typing import Any
import numpy
Expand Down Expand Up @@ -78,10 +79,21 @@ def run_node(cls, node, inputs, device=None, outputs_info=None, **kwargs):
raise NotImplementedError("Unable to run the model node by node.")


dft_atol = 1e-3 if sys.platform != "linux" else 1e-5
backend_test = onnx.backend.test.BackendTest(
ExtendedReferenceEvaluatorBackend, __name__
ExtendedReferenceEvaluatorBackend,
__name__,
test_kwargs={
"test_dft": {"atol": dft_atol},
"test_dft_axis": {"atol": dft_atol},
"test_dft_axis_opset19": {"atol": dft_atol},
"test_dft_inverse": {"atol": dft_atol},
"test_dft_inverse_opset19": {"atol": dft_atol},
"test_dft_opset19": {"atol": dft_atol},
},
)


if os.getenv("APPVEYOR"):
backend_test.exclude("(test_vgg19|test_zfnet)")
if platform.architecture()[0] == "32bit":
Expand Down
57 changes: 0 additions & 57 deletions azure-pipelines.yml
Original file line number Diff line number Diff line change
Expand Up @@ -93,63 +93,6 @@ jobs:
python -m pytest
displayName: 'Runs Unit Tests'

- job: 'TestLinuxArrayApi'
pool:
vmImage: 'ubuntu-latest'
strategy:
matrix:
Python310-Linux:
python.version: '3.10'
maxParallel: 3

steps:
- task: UsePythonVersion@0
inputs:
versionSpec: '$(python.version)'
architecture: 'x64'
- script: sudo apt-get update
displayName: 'AptGet Update'
- script: python -m pip install --upgrade pip setuptools wheel
displayName: 'Install tools'
- script: pip install -r requirements.txt
displayName: 'Install Requirements'
- script: pip install onnxruntime
displayName: 'Install onnxruntime'
- script: python setup.py install
displayName: 'Install onnx_array_api'
- script: |
git clone https://github.com/data-apis/array-api-tests.git
displayName: 'clone array-api-tests'
- script: |
cd array-api-tests
git submodule update --init --recursive
cd ..
displayName: 'get submodules for array-api-tests'
- script: pip install -r array-api-tests/requirements.txt
displayName: 'Install Requirements dev'
- script: |
export ARRAY_API_TESTS_MODULE=onnx_array_api.array_api.onnx_numpy
cd array-api-tests
displayName: 'Set API'
- script: |
python -m pip freeze
displayName: 'pip freeze'
- script: |
export ARRAY_API_TESTS_MODULE=onnx_array_api.array_api.onnx_numpy
cd array-api-tests
python -m pytest -x array_api_tests/test_creation_functions.py --skips-file=../_unittests/onnx-numpy-skips.txt --hypothesis-explain
displayName: "numpy test_creation_functions.py"
# - script: |
# export ARRAY_API_TESTS_MODULE=onnx_array_api.array_api.onnx_ort
# cd array-api-tests
# python -m pytest -x array_api_tests/test_creation_functions.py --skips-file=../_unittests/onnx-ort-skips.txt --hypothesis-explain
# displayName: "ort test_creation_functions.py"
#- script: |
# export ARRAY_API_TESTS_MODULE=onnx_array_api.array_api.onnx_numpy
# cd array-api-tests
# python -m pytest -x array_api_tests
# displayName: "all tests"

- job: 'TestLinux'
pool:
vmImage: 'ubuntu-latest'
Expand Down
2 changes: 1 addition & 1 deletion onnx_array_api/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,5 +2,5 @@
APIs to create ONNX Graphs.
"""

__version__ = "0.3.1"
__version__ = "0.3.2"
__author__ = "Xavier Dupré"
17 changes: 9 additions & 8 deletions onnx_array_api/plotting/text_plot.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,10 @@ def __init__(self, i, atts):
self.nodes_missing_value_tracks_true = None
for k, v in atts.items():
if k.startswith("nodes"):
setattr(self, k, v[i])
if k.endswith("_as_tensor"):
setattr(self, k.replace("_as_tensor", ""), v[i])
else:
setattr(self, k, v[i])
self.depth = 0
self.true_false = ""
self.targets = []
Expand Down Expand Up @@ -120,10 +123,7 @@ def process_tree(atts, treeid):
]
for k, v in atts.items():
if k.startswith(prefix):
if "classlabels" in k:
short[k] = list(v)
else:
short[k] = [v[i] for i in idx]
short[k] = list(v) if "classlabels" in k else [v[i] for i in idx]

nodes = OrderedDict()
for i in range(len(short["nodes_treeids"])):
Expand All @@ -132,9 +132,10 @@ def process_tree(atts, treeid):
for i in range(len(short[f"{prefix}_treeids"])):
idn = short[f"{prefix}_nodeids"][i]
node = nodes[idn]
node.append_target(
tid=short[f"{prefix}_ids"][i], weight=short[f"{prefix}_weights"][i]
)
key = f"{prefix}_weights"
if key not in short:
key = f"{prefix}_weights_as_tensor"
node.append_target(tid=short[f"{prefix}_ids"][i], weight=short[key][i])

def iterate(nodes, node, depth=0, true_false=""):
node.depth = depth
Expand Down
2 changes: 1 addition & 1 deletion onnx_array_api/profiling.py
Original file line number Diff line number Diff line change
Expand Up @@ -438,7 +438,7 @@ def add_rows(rows, d):
if verbose and fLOG is not None:
fLOG(
"[pstats] %s=%r"
% ((clean_text(k[0].replace("\\", "/")),) + k[1:], v)
% ((clean_text(k[0].replace("\\", "/")), *k[1:]), v)
)
if len(v) < 5:
continue
Expand Down
25 changes: 17 additions & 8 deletions onnx_array_api/reference/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,21 @@
import numpy as np
from onnx import TensorProto
from onnx.numpy_helper import from_array as onnx_from_array
from onnx.reference.ops.op_cast import (
bfloat16,
float8e4m3fn,
float8e4m3fnuz,
float8e5m2,
float8e5m2fnuz,
)
from onnx.reference.op_run import to_array_extended

try:
from onnx.reference.ops.op_cast import (
bfloat16,
float8e4m3fn,
float8e4m3fnuz,
float8e5m2,
float8e5m2fnuz,
)
except ImportError:
bfloat16 = None
try:
from onnx.reference.op_run import to_array_extended
except ImportError:
from onnx.numpy_helper import to_array as to_array_extended
from .evaluator import ExtendedReferenceEvaluator
from .evaluator_yield import (
DistanceExecution,
Expand All @@ -28,6 +35,8 @@ def from_array_extended(tensor: np.array, name: Optional[str] = None) -> TensorP
:param name: name
:return: TensorProto
"""
if bfloat16 is None:
return onnx_from_array(tensor, name)
dt = tensor.dtype
if dt == float8e4m3fn and dt.descr[0][0] == "e4m3fn":
to = TensorProto.FLOAT8E4M3FN
Expand Down
22 changes: 14 additions & 8 deletions onnx_array_api/reference/ops/op_cast_like.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,23 @@
from onnx.helper import np_dtype_to_tensor_dtype
from onnx.onnx_pb import TensorProto
from onnx.reference.op_run import OpRun
from onnx.reference.ops.op_cast import (
bfloat16,
cast_to,
float8e4m3fn,
float8e4m3fnuz,
float8e5m2,
float8e5m2fnuz,
)
from onnx.reference.ops.op_cast import cast_to

try:
from onnx.reference.ops.op_cast import (
bfloat16,
float8e4m3fn,
float8e4m3fnuz,
float8e5m2,
float8e5m2fnuz,
)
except ImportError:
bfloat16 = None


def _cast_like(x, y, saturate):
if bfloat16 is None:
return (cast_to(x, y.dtype, saturate),)
if y.dtype == bfloat16 and y.dtype.descr[0][0] == "bfloat16":
# np.uint16 == np.uint16 is True as well as np.uint16 == bfloat16
to = TensorProto.BFLOAT16
Expand Down