Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Adds a mode to compare models without execution#76

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to ourterms of service andprivacy statement. We’ll occasionally send you account related emails.

Already on GitHub?Sign in to your account

Merged
sdpython merged 14 commits intomainfromcmd
Feb 21, 2024
Merged
Show file tree
Hide file tree
Changes from1 commit
Commits
Show all changes
14 commits
Select commitHold shift + click to select a range
6eb6adf
update requirements
xadupreFeb 2, 2024
4f0a994
Merge branch 'main' of https://github.com/sdpython/onnx-array-api
xadupreFeb 2, 2024
c7bb055
Merge branch 'main' of https://github.com/sdpython/onnx-array-api
xadupreFeb 2, 2024
a3d4ccf
Merge branch 'main' of https://github.com/sdpython/onnx-array-api
xadupreFeb 6, 2024
7ed1385
Merge branch 'main' of https://github.com/sdpython/onnx-array-api
xadupreFeb 6, 2024
bab2a6b
Merge branch 'main' of https://github.com/sdpython/onnx-array-api
xadupreFeb 7, 2024
014404b
Merge branch 'main' of https://github.com/sdpython/onnx-array-api
xadupreFeb 7, 2024
00e2a1c
Merge branch 'main' of https://github.com/sdpython/onnx-array-api
xadupreFeb 14, 2024
d1aff97
Merge branch 'main' of https://github.com/sdpython/onnx-array-api
xadupreFeb 15, 2024
271c29d
Add a mode to compare model without execution
xadupreFeb 21, 2024
918e0dd
changelogs
xadupreFeb 21, 2024
3b10ea8
improve initializer
xadupreFeb 21, 2024
463bc86
fix display
xadupreFeb 21, 2024
b627f81
fix side
xadupreFeb 21, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
PrevPrevious commit
NextNext commit
Add a mode to compare model without execution
  • Loading branch information
@xadupre
xadupre committedFeb 21, 2024
commit271c29d604a9bffdebc5890c27f7952e41bf2c8e
66 changes: 65 additions & 1 deletion_unittests/ut_reference/test_evaluator_yield.py
View file
Open in desktop
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
import unittest
import numpy as np
from onnx import TensorProto
from onnx.checker import check_model
from onnx.helper import (
make_function,
make_graph,
Expand All@@ -9,6 +10,7 @@
make_opsetid,
make_tensor_value_info,
)
from onnx.numpy_helper import from_array
from onnx.parser import parse_model
from onnx_array_api.ext_test_case import ExtTestCase
from onnx_array_api.reference import (
Expand DownExpand Up@@ -426,7 +428,7 @@ def test_distance_sequence_str(self):
002=|INPUTfloat322x2ABCDB|INPUTfloat322x2ABCDB
003~|INPUTfloat322x3ABCDX|INPUTfloat322x2ABCDX
004-|RESULTfloat322x2CEIOExpH|
005=|RESULTfloat322x2CEIOLinearRegrY1|RESULTfloat322x2CEIOLinearRegrY1
005=|RESULTfloat322x2CEIOLinearRegresY1|RESULTfloat322x2CEIOLinearRegresY1
006~|RESULTfloat322x2CEIOAbsY|RESULTfloat322x3CEIPAbsZ
007~|OUTPUTfloat322x2CEIOY|OUTPUTfloat322x2CEIPY
""".replace(
Expand DownExpand Up@@ -460,6 +462,68 @@ def test_compare_execution(self):
self.assertIn("CAAA Constant", text)
self.assertEqual(len(align), 5)

def test_no_execution(self):
model = make_model(
make_graph(
[
make_node("Unsqueeze", ["X", "zero"], ["xu1"]),
make_node("Unsqueeze", ["xu1", "un"], ["xu2"]),
make_node("Reshape", ["xu2", "shape1"], ["xm1"]),
make_node("Reshape", ["Y", "shape2"], ["xm2c"]),
make_node("Cast", ["xm2c"], ["xm2"], to=1),
make_node("MatMul", ["xm1", "xm2"], ["xm"]),
make_node("Reshape", ["xm", "shape3"], ["Z"]),
],
"dummy",
[
make_tensor_value_info("X", TensorProto.FLOAT, [32, 128]),
make_tensor_value_info("Y", TensorProto.FLOAT, [3, 5, 128, 64]),
],
[make_tensor_value_info("Z", TensorProto.FLOAT, [3, 5, 32, "N"])],
[
from_array(np.array([0], dtype=np.int64), name="zero"),
from_array(np.array([1], dtype=np.int64), name="un"),
from_array(np.array([1, 32, 128], dtype=np.int64), name="shape1"),
from_array(np.array([15, 128, 64], dtype=np.int64), name="shape2"),
from_array(np.array([3, 5, 32, 64], dtype=np.int64), name="shape3"),
],
)
)
check_model(model)
res1, res2, align, dc = compare_onnx_execution(model, model, mode="nodes")
text = dc.to_str(res1, res2, align)
self.assertIn("012 = | NODE", text)

model2 = make_model(
make_graph(
[
make_node("Unsqueeze", ["X", "zero"], ["xu1"]),
make_node("Unsqueeze", ["xu1", "un"], ["xu2"]),
make_node("Reshape", ["xu2", "shape1"], ["xm1"]),
make_node("Reshape", ["Y", "shape2"], ["xm2c"]),
make_node("MatMul", ["xm1", "xm2c"], ["xm"]),
make_node("Reshape", ["xm", "shape3"], ["Z"]),
],
"dummy",
[
make_tensor_value_info("X", TensorProto.FLOAT, [32, 128]),
make_tensor_value_info("Y", TensorProto.FLOAT, [3, 5, 128, 64]),
],
[make_tensor_value_info("Z", TensorProto.FLOAT, [3, 5, 32, "N"])],
[
from_array(np.array([0], dtype=np.int64), name="zero"),
from_array(np.array([1], dtype=np.int64), name="un"),
from_array(np.array([1, 32, 128], dtype=np.int64), name="shape1"),
from_array(np.array([15, 128, 64], dtype=np.int64), name="shape2"),
from_array(np.array([3, 5, 32, 64], dtype=np.int64), name="shape3"),
],
)
)
check_model(model2)
res1, res2, align, dc = compare_onnx_execution(model, model2, mode="nodes")
text = dc.to_str(res1, res2, align)
self.assertIn("012 = | NODE", text)


if __name__ == "__main__":
unittest.main(verbosity=2)
15 changes: 12 additions & 3 deletionsonnx_array_api/_command_lines_parser.py
View file
Open in desktop
Original file line numberDiff line numberDiff line change
Expand Up@@ -20,7 +20,7 @@ def get_main_parser() -> ArgumentParser:
Selects a command.

'translate' exports an onnx graph into a piece of code replicating it,
'compares' compares the execution of two onnx models
'compare' compares the execution of two onnx models
"""
),
)
Expand DownExpand Up@@ -90,6 +90,13 @@ def get_parser_compare() -> ArgumentParser:
required=True,
help="second onnx model",
)
parser.add_argument(
"-m",
"--mode",
choices=["execute", "nodes"],
default="execute",
help="compare the execution ('execute') or the nodes only ('nodes')",
)
parser.add_argument(
"-v",
"--verbose",
Expand All@@ -112,7 +119,9 @@ def _cmd_compare(argv: List[Any]):
args = parser.parse_args(argv[1:])
onx1 = onnx.load(args.model1)
onx2 = onnx.load(args.model2)
res1, res2, align, dc = compare_onnx_execution(onx1, onx2, verbose=args.verbose)
res1, res2, align, dc = compare_onnx_execution(
onx1, onx2, verbose=args.verbose, mode=args.mode
)
text = dc.to_str(res1, res2, align, column_size=args.column_size)
print(text)

Expand All@@ -127,7 +136,7 @@ def main(argv: Optional[List[Any]] = None):
parser = get_main_parser()
parser.parse_args(argv)
else:
parsers = dict(translate=get_parser_translate)
parsers = dict(translate=get_parser_translate, compare=get_parser_compare)
cmd = argv[0]
if cmd not in parsers:
raise ValueError(
Expand Down
169 changes: 142 additions & 27 deletionsonnx_array_api/reference/evaluator_yield.py
View file
Open in desktop
Original file line numberDiff line numberDiff line change
Expand Up@@ -2,7 +2,9 @@
from typing import Any, Dict, List, Iterator, Optional, Tuple, Union
from enum import IntEnum
import numpy as np
from onnx import ModelProto, TensorProto, ValueInfoProto
from onnx import ModelProto, TensorProto, ValueInfoProto, load
from onnx.helper import tensor_dtype_to_np_dtype
from onnx.shape_inference import infer_shapes
from .evaluator import ExtendedReferenceEvaluator


Expand All@@ -20,6 +22,7 @@ class ResultType(IntEnum):
SPARSE_INITIALIZER = 4
INPUT = 8
OUTPUT = 16
NODE = 32

def __repr__(self):
return f"{self.__class__.__name__}.{self._name_}"
Expand DownExpand Up@@ -57,12 +60,13 @@ def __getitem__(self, i: int) -> Any:
raise IndexError(f"i={i} out of boundary")

def __str__(self):
dtype = self.dtype if self.dtype != 0 else ""
els = [
_align(self.kind._name_, 6),
_align(str(self.dtype).replace("dtype(", "").replace(")", ""), 8),
_align("x".join(map(str, self.shape)), 15),
_align(str(dtype).replace("dtype(", "").replace(")", ""), 8),
_align("x".join("" if self.shape is None elsemap(str, self.shape)), 15),
self.summary,
_align(self.op_type or "",10),
_align(self.op_type or "",12),
self.name or "",
]
return " ".join(els)
Expand DownExpand Up@@ -270,6 +274,22 @@ def _cost_type(self, t1: "np.dtype", t2: "np.dtype") -> float:
return 1

def _cost_shape(self, s1: Tuple[int, ...], s2: Tuple[int, ...]) -> float:
if s1 is None or s2 is None:
return self.rank_cost
if any(map(lambda s: isinstance(s, str), s1)) or any(
map(lambda s: isinstance(s, str), s2)
):
# dynamic shapes
if len(s1) != len(s2):
return self.rank_cost
d = 0
for i, j in zip(s1, s2):
if isinstance(i, int) and isinstance(j, int):
d += abs(i - j)
elif i != j:
d += self.rank_cost / 2
return d

d = abs(np.prod(s1) - np.prod(s2))
if len(s1) != len(s2):
return self.rank_cost + d
Expand DownExpand Up@@ -424,12 +444,85 @@ def generate_inputs(model: ModelProto) -> List[np.ndarray]:
return inputs


def _update_shape_types_with_proto(
proto: ModelProto,
) -> Dict[str, Tuple[int, Tuple[Union[int, str], ...]]]:
"""
Retrieves the shapes and types for a model.
"""
assert isinstance(proto, ModelProto), f"Unexpected type {type(proto)} for proto"
res = {}

for val in proto.graph.input:
itype = val.type.tensor_type.elem_type
shape = tuple(
d.dim_param if d.dim_param else d.dim_value
for d in val.type.tensor_type.shape.dim
)
res[val.name] = [itype, shape]

for val in proto.graph.output:
itype = val.type.tensor_type.elem_type
shape = tuple(
d.dim_param if d.dim_param else d.dim_value
for d in val.type.tensor_type.shape.dim
)
res[val.name] = [itype, shape]

for val in proto.graph.initializer:
itype = val.data_type
shape = tuple(d for d in val.dims)
res[val.name] = [itype, shape]

new_proto = infer_shapes(proto)
for val in new_proto.graph.value_info:
itype = val.type.tensor_type.elem_type
shape = tuple(
d.dim_param if d.dim_param else d.dim_value
for d in val.type.tensor_type.shape.dim
)
res[val.name] = [itype, shape]

return res


def _enumerate_result_no_execution(model: ModelProto) -> Iterator[ResultType]:
"""
Produces a list of results based on a model in order to
trigger the edit distance comparison.
"""
type_shape = _update_shape_types_with_proto(model)
for i in model.graph.initializer:
itype, shape = type_shape.get(i.name, (0, None))
dtype = tensor_dtype_to_np_dtype(itype)
yield ResultExecution(
ResultType.INITIALIZER, dtype, shape, "????", "INIT", i.name
)
for i in model.graph.input:
itype, shape = type_shape.get(i.name, (0, None))
dtype = tensor_dtype_to_np_dtype(itype)
yield ResultExecution(ResultType.INPUT, dtype, shape, "????", "INPUT", i.name)
for node in model.graph.node:
yield ResultExecution(ResultType.NODE, 0, None, "????", node.op_type, node.name)
for o in node.output:
itype, shape = type_shape.get(o, (0, None))
dtype = tensor_dtype_to_np_dtype(itype)
yield ResultExecution(
ResultType.RESULT, dtype, shape, "????", node.op_type, o
)
for i in model.graph.output:
itype, shape = type_shape.get(i.name, (0, None))
dtype = tensor_dtype_to_np_dtype(itype)
yield ResultExecution(ResultType.OUTPUT, dtype, shape, "????", "OUTPUT", i.name)


def compare_onnx_execution(
model1: ModelProto,
model2: ModelProto,
inputs: Optional[Union[List[Any], Tuple[Dict[str, Any]]]] = None,
verbose: int = 0,
raise_exc: bool = True,
mode: str = "execute",
) -> Tuple[List[ResultExecution], List[ResultExecution], List[Tuple[int, int]]]:
"""
Compares the execution of two onnx models.
Expand All@@ -443,33 +536,55 @@ def compare_onnx_execution(
the same number of inputs or two dictionaries, one for each model
:param verbose: verbosity
:param raise_exc: raise exception if the execution fails or stop at the error
:param mode: the model should be executed but the function can be executed
but the comparison may append on nodes only
:return: four results, a sequence of results for the first model and the second model,
the alignment between the two, DistanceExecution
"""
if verbose:
print("[compare_onnx_execution] generate inputs")
if inputs is None:
inputs = generate_inputs(model1)
if isinstance(inputs, tuple):
assert len(inputs) == 2, f"Unexpected number {len(inputs)} of inputs."
feeds1, feeds2 = inputs
assert mode in {"execute", "nodes"}, f"Unexpected value for mode={mode!r}."

if mode == "execute":
if inputs is None:
if verbose:
print("[compare_onnx_execution] generate inputs")
inputs = generate_inputs(model1)
if isinstance(inputs, tuple):
assert len(inputs) == 2, f"Unexpected number {len(inputs)} of inputs."
feeds1, feeds2 = inputs
else:
feeds1 = {i.name: v for i, v in zip(model1.graph.input, inputs)}
feeds2 = {i.name: v for i, v in zip(model2.graph.input, inputs)}
assert isinstance(feeds1, dict), f"Unexpected type {type(feeds1)} for inputs"
assert isinstance(feeds2, dict), f"Unexpected type {type(feeds2)} for inputs"
if verbose:
print(f"[compare_onnx_execution] execute with {len(inputs)} inputs")
print("[compare_onnx_execution] execute first model")
res1 = list(
YieldEvaluator(model1).enumerate_summarized(
None, feeds1, raise_exc=raise_exc
)
)
if verbose:
print(f"[compare_onnx_execution] got {len(res1)} results")
print("[compare_onnx_execution] execute second model")
res2 = list(
YieldEvaluator(model2).enumerate_summarized(
None, feeds2, raise_exc=raise_exc
)
)
elif mode == "nodes":
# No execution.
if verbose:
print("[compare_onnx_execution] loading first model")
proto1 = load(model1) if isinstance(model1, str) else model2
if verbose:
print("[compare_onnx_execution] loading first model")
proto2 = load(model2) if isinstance(model2, str) else model1
res1 = list(_enumerate_result_no_execution(proto1))
res2 = list(_enumerate_result_no_execution(proto2))
else:
feeds1 = {i.name: v for i, v in zip(model1.graph.input, inputs)}
feeds2 = {i.name: v for i, v in zip(model2.graph.input, inputs)}
assert isinstance(feeds1, dict), f"Unexpected type {type(feeds1)} for inputs"
assert isinstance(feeds2, dict), f"Unexpected type {type(feeds2)} for inputs"
if verbose:
print(f"[compare_onnx_execution] got {len(inputs)} inputs")
print("[compare_onnx_execution] execute first model")
res1 = list(
YieldEvaluator(model1).enumerate_summarized(None, feeds1, raise_exc=raise_exc)
)
if verbose:
print(f"[compare_onnx_execution] got {len(res1)} results")
print("[compare_onnx_execution] execute second model")
res2 = list(
YieldEvaluator(model2).enumerate_summarized(None, feeds2, raise_exc=raise_exc)
)
return

if verbose:
print(f"[compare_onnx_execution] got {len(res2)} results")
print("[compare_onnx_execution] compute edit distance")
Expand Down

[8]ページ先頭

©2009-2025 Movatter.jp