Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Better error messages#73

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to ourterms of service andprivacy statement. We’ll occasionally send you account related emails.

Already on GitHub?Sign in to your account

Merged
sdpython merged 12 commits intomainfromimp
Feb 7, 2024
Merged
Show file tree
Hide file tree
Changes fromall commits
Commits
Show all changes
12 commits
Select commitHold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions_unittests/onnx-numpy-skips.txt
View file
Open in desktop
Original file line numberDiff line numberDiff line change
Expand Up@@ -4,6 +4,7 @@
array_api_tests/test_creation_functions.py::test_asarray_arrays
array_api_tests/test_creation_functions.py::test_empty
array_api_tests/test_creation_functions.py::test_empty_like
array_api_tests/test_creation_functions.py::test_eye
# fails to precision issue
array_api_tests/test_creation_functions.py::test_linspace
array_api_tests/test_creation_functions.py::test_meshgrid
26 changes: 1 addition & 25 deletions_unittests/ut_reference/test_array_tensor.py
View file
Open in desktop
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,7 @@
import unittest
import numpy as np
from onnx import TensorProto
from onnx.helper import (
make_graph,
make_model,
make_node,
make_tensor_value_info,
make_opsetid,
)
from onnx.helper import make_graph, make_model, make_node, make_tensor_value_info
from onnx_array_api.ext_test_case import ExtTestCase
from onnx_array_api.reference import (
to_array_extended,
Expand DownExpand Up@@ -57,24 +51,6 @@ def make_model_f8(fr, to):
back = from_array_extended(got, "a")
self.assertEqual(to, back.data_type)

def test_fused_matmul(self):
model = make_model(
make_graph(
[make_node("FusedMatMul", ["X", "Y"], ["Z"], domain="com.microsoft")],
"name",
[
make_tensor_value_info("X", TensorProto.FLOAT, None),
make_tensor_value_info("Y", TensorProto.FLOAT, None),
],
[make_tensor_value_info("Z", TensorProto.FLOAT, None)],
),
opset_imports=[make_opsetid("", 18), make_opsetid("com.microsoft", 1)],
)
ref = ExtendedReferenceEvaluator(model)
a = np.arange(4).reshape(-1, 2)
got = ref.run(None, {"X": a, "Y": a})
self.assertEqualArray(a @ a, got[0])


if __name__ == "__main__":
unittest.main(verbosity=2)
64 changes: 64 additions & 0 deletions_unittests/ut_reference/test_reference_ops.py
View file
Open in desktop
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,64 @@
import unittest
import numpy as np
from onnx import TensorProto
from onnx.helper import (
make_graph,
make_model,
make_node,
make_tensor_value_info,
make_opsetid,
)
from onnx_array_api.ext_test_case import ExtTestCase
from onnx_array_api.reference import ExtendedReferenceEvaluator


class TestReferenceOps(ExtTestCase):

def test_fused_matmul(self):
model = make_model(
make_graph(
[make_node("FusedMatMul", ["X", "Y"], ["Z"], domain="com.microsoft")],
"name",
[
make_tensor_value_info("X", TensorProto.FLOAT, None),
make_tensor_value_info("Y", TensorProto.FLOAT, None),
],
[make_tensor_value_info("Z", TensorProto.FLOAT, None)],
),
opset_imports=[make_opsetid("", 18), make_opsetid("com.microsoft", 1)],
)
ref = ExtendedReferenceEvaluator(model)
a = np.arange(4).reshape(-1, 2)
got = ref.run(None, {"X": a, "Y": a})
self.assertEqualArray(a @ a, got[0])

def test_fused_matmul11(self):
model = make_model(
make_graph(
[
make_node(
"FusedMatMul",
["X", "Y"],
["Z"],
transA=1,
transB=1,
domain="com.microsoft",
)
],
"name",
[
make_tensor_value_info("X", TensorProto.FLOAT, None),
make_tensor_value_info("Y", TensorProto.FLOAT, None),
],
[make_tensor_value_info("Z", TensorProto.FLOAT, None)],
),
opset_imports=[make_opsetid("", 18), make_opsetid("com.microsoft", 1)],
)
ref = ExtendedReferenceEvaluator(model)
a = np.arange(4).reshape(-1, 2)
got = ref.run(None, {"X": a, "Y": a})
self.assertEqualArray(a.T @ a.T, got[0])


if __name__ == "__main__":
unittest.main(verbosity=2)
3 changes: 3 additions & 0 deletionsonnx_array_api/reference/evaluator.py
View file
Open in desktop
Original file line numberDiff line numberDiff line change
Expand Up@@ -110,4 +110,7 @@ def run(self, *args, **kwargs):
"""
See :meth:`onnx.reference.ReferenceEvaluator.run`.
"""
if len(args) == 1 and isinstance(args[0], list):
feeds = dict(zip(self.input_names, args[0]))
return self.run(None, feeds, **kwargs)
return ReferenceEvaluator.run(self, *args, **kwargs)
30 changes: 23 additions & 7 deletionsonnx_array_api/reference/evaluator_yield.py
View file
Open in desktop
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
from dataclasses import dataclass
from typing import Any, Dict, List, Iterator, Optional, Tuple
from typing import Any, Dict, List, Iterator, Optional, Tuple, Union
from enum import IntEnum
import numpy as np
from onnx import ModelProto, TensorProto, ValueInfoProto
Expand DownExpand Up@@ -77,6 +77,12 @@ def make_summary(value: Any, length: int = 4, modulo: int = 26) -> str:
:param module: discretization parameter
:return: short string
"""
if isinstance(value, np.float32):
# This should not happen.
value = np.array(value)
assert isinstance(
value, np.ndarray
), f"Unexpected type {type(value)} for value, it must be a numpy array."
value4 = np.zeros(length, dtype=np.float64)
if value.size <= length:
value4[: value.size] = value.flatten().astype(np.float64)
Expand DownExpand Up@@ -170,6 +176,9 @@ def enumerate_results(
outputs = node.run(*inputs, **linked_attributes)
except Exception:
if raise_exc:
# ExtendedReferenceEvaluator(self.onnx_model, verbose=10).run(
# None, feed_inputs
# )
raise
yield_output = False
break
Expand DownExpand Up@@ -286,12 +295,12 @@ def distance_sequence(
:param s2: second sequence
:return: distance and alignment
"""
delay = self.max_lag
delay =max(self.max_lag, abs(len(s2) - len(s1)) + 1)
distance = {(-1, -1): 0}
predecessor = {(-1, -1): None}
for i in range(len(s1)):
for j in range(max(0, i - delay), min(len(s2), i + delay)):
best = 1e100
best =distance.get((i, j),1e100)
pred = None
ki, kj = i - 1, j - 1
if (ki, kj) in distance:
Expand DownExpand Up@@ -418,7 +427,7 @@ def generate_inputs(model: ModelProto) -> List[np.ndarray]:
def compare_onnx_execution(
model1: ModelProto,
model2: ModelProto,
inputs: Optional[List[Any]] = None,
inputs: Optional[Union[List[Any], Tuple[Dict[str, Any]]]] = None,
verbose: int = 0,
raise_exc: bool = True,
) -> Tuple[List[ResultExecution], List[ResultExecution], List[Tuple[int, int]]]:
Expand All@@ -430,7 +439,8 @@ def compare_onnx_execution(

:param model1: first model
:param model2: second model
:param inputs: inputs to use
:param inputs: inputs to use, a list of inputs if both models have
the same number of inputs or two dictionaries, one for each model
:param verbose: verbosity
:param raise_exc: raise exception if the execution fails or stop at the error
:return: four results, a sequence of results for the first model and the second model,
Expand All@@ -440,8 +450,14 @@ def compare_onnx_execution(
print("[compare_onnx_execution] generate inputs")
if inputs is None:
inputs = generate_inputs(model1)
feeds1 = {i.name: v for i, v in zip(model1.graph.input, inputs)}
feeds2 = {i.name: v for i, v in zip(model2.graph.input, inputs)}
if isinstance(inputs, tuple):
assert len(inputs) == 2, f"Unexpected number {len(inputs)} of inputs."
feeds1, feeds2 = inputs
else:
feeds1 = {i.name: v for i, v in zip(model1.graph.input, inputs)}
feeds2 = {i.name: v for i, v in zip(model2.graph.input, inputs)}
assert isinstance(feeds1, dict), f"Unexpected type {type(feeds1)} for inputs"
assert isinstance(feeds2, dict), f"Unexpected type {type(feeds2)} for inputs"
if verbose:
print(f"[compare_onnx_execution] got {len(inputs)} inputs")
print("[compare_onnx_execution] execute first model")
Expand Down
14 changes: 9 additions & 5 deletionsonnx_array_api/reference/ops/op_fused_matmul.py
View file
Open in desktop
Original file line numberDiff line numberDiff line change
Expand Up@@ -22,10 +22,14 @@ def _run(
transBatchB == 0
), f"Not implemented for transBatchB==1 and {A.shape}x{B.shape}"
if transA:
dim = len(A.shape)
A = A.transpose(axes=(dim - 2, dim - 1))
perm = list(range(len(A.shape)))
dim = len(perm)
perm[dim - 2], perm[dim - 1] = perm[dim - 1], perm[dim - 2]
A = np.transpose(A, perm)
if transB:
dim = len(B.shape)
B = B.transpose(axes=(dim - 2, dim - 1))
perm = list(range(len(B.shape)))
dim = len(perm)
perm[dim - 2], perm[dim - 1] = perm[dim - 1], perm[dim - 2]
B = np.transpose(B, perm)
a = np.array(alpha, dtype=A.dtype)
return (A @ B * a,)
return (np.matmul(A, B) * a,)

[8]ページ先頭

©2009-2025 Movatter.jp