Skip to content

Arm backend: Add VGF tests to add op unit tests #12058

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 33 additions & 0 deletions backends/arm/test/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,39 @@ def get_tosa_compile_spec_unbuilt(
return compile_spec_builder


def get_vgf_compile_spec(
tosa_spec: str | TosaSpecification,
compiler_flags: Optional[str] = "",
custom_path=None,
) -> list[CompileSpec]:
"""
Default compile spec for VGF tests.
"""
return get_vgf_compile_spec_unbuilt(tosa_spec, compiler_flags, custom_path).build()


def get_vgf_compile_spec_unbuilt(
tosa_spec: str | TosaSpecification,
compiler_flags: Optional[str] = "",
custom_path=None,
) -> ArmCompileSpecBuilder:
"""Get the ArmCompileSpecBuilder for the default VGF tests, to modify
the compile spec before calling .build() to finalize it.
"""
if not custom_path:
custom_path = maybe_get_tosa_collate_path()

if custom_path is not None:
os.makedirs(custom_path, exist_ok=True)
compile_spec_builder = (
ArmCompileSpecBuilder()
.vgf_compile_spec(tosa_spec, compiler_flags)
.dump_intermediate_artifacts_to(custom_path)
)

return compile_spec_builder


def get_u55_compile_spec(
macs: int = 128,
system_config: str = "Ethos_U55_High_End_Embedded",
Expand Down
26 changes: 26 additions & 0 deletions backends/arm/test/ops/test_add.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@

from typing import Tuple

import pytest

import torch
from executorch.backends.arm.arm_backend import get_tosa_spec
from executorch.backends.arm.quantizer import arm_quantizer
Expand All @@ -16,6 +18,7 @@
EthosU85PipelineBI,
TosaPipelineBI,
TosaPipelineMI,
VgfPipeline,
)
from executorch.backends.arm.tosa_specification import TosaSpecification
from executorch.backends.xnnpack.test.tester import Quantize
Expand Down Expand Up @@ -184,3 +187,26 @@ def test_add_tensor_u85_BI_2(test_data: input_t2):
Add2(), test_data(), aten_op, exir_op, run_on_fvp=True
)
pipeline.run()


@common.parametrize("test_data", Add.test_data)
@pytest.mark.skip(reason="Model converter not yet made available")
def test_add_tensor_vgf_fp(test_data: input_t1):
pipeline = VgfPipeline[input_t1](
Add(), test_data(), aten_op, exir_op, tosa_version="TOSA-1.0+FP"
)
pipeline.run()


@common.parametrize("test_data", Add.test_data)
@pytest.mark.skip(reason="Model converter not yet made available")
def test_add_tensor_vgf_int(test_data: input_t1):
pipeline = VgfPipeline[input_t1](
Add(),
test_data(),
aten_op,
exir_op,
tosa_version="TOSA-1.0+INT",
symmetric_io_quantization=True,
)
pipeline.run()
8 changes: 8 additions & 0 deletions backends/arm/test/tester/arm_tester.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
get_tosa_spec,
is_ethosu,
is_tosa,
is_vgf,
)
from executorch.backends.arm.ethosu_partitioner import EthosUPartitioner
from executorch.backends.arm.quantizer import (
Expand All @@ -61,6 +62,8 @@
from executorch.backends.arm.tosa_partitioner import TOSAPartitioner
from executorch.backends.arm.tosa_specification import TosaSpecification

from executorch.backends.arm.vgf_partitioner import VgfPartitioner

from executorch.backends.test.harness.stages import Stage, StageType
from executorch.backends.xnnpack.test.tester import Tester
from executorch.devtools.backend_debug import get_delegation_info
Expand Down Expand Up @@ -384,6 +387,11 @@ def to_edge_transform_and_lower(
compile_spec=self.compile_spec,
additional_checks=additional_checks,
)
elif is_vgf(self.compile_spec):
arm_partitioner = VgfPartitioner(
compile_spec=self.compile_spec,
additional_checks=additional_checks,
)
else:
raise ValueError("compile spec doesn't target any Arm Partitioner")
partitioners = [arm_partitioner]
Expand Down
121 changes: 121 additions & 0 deletions backends/arm/test/tester/test_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
EthosUQuantizer,
get_symmetric_quantization_config,
TOSAQuantizer,
VgfQuantizer,
)
from executorch.backends.arm.test import common, conftest
from executorch.backends.arm.test.tester.arm_tester import ArmTester, RunPasses
Expand Down Expand Up @@ -827,3 +828,123 @@ def __init__(
},
)
self.pop_stage("to_executorch")


class VgfPipeline(BasePipelineMaker, Generic[T]):
"""
Lowers a graph based on TOSA spec (with or without quantization) and converts TOSA to VFG.

Attributes:
module: The module which the pipeline is applied to.
test_data: Data used for quantizing and testing the module.

aten_ops: Aten dialect ops expected to be found in the graph after export.
exir_ops: Exir dialect ops expected to be found in the graph after to_edge.
if not using use_edge_to_transform_and_lower.

run_on_vulkan_runtime: Not yet supported.

vgf_compiler_flags: Optional compiler flags.

tosa_version: A string for identifying the TOSA version.

use_edge_to_transform_and_lower: Selects betweeen two possible ways of lowering the module.
custom_path : Path to dump intermediate artifacts such as tosa and pte to.
"""

def __init__(
self,
module: torch.nn.Module,
test_data: T,
aten_op: str | List[str],
exir_op: Optional[str | List[str]] = None,
run_on_vulkan_runtime: bool = False,
vgf_compiler_flags: Optional[str] = "",
tosa_version: str = "TOSA-1.0+FP",
symmetric_io_quantization: bool = False,
per_channel_quantization: bool = False,
use_to_edge_transform_and_lower: bool = True,
custom_path: str = None,
atol: float = 1e-03,
rtol: float = 1e-03,
qtol: int = 1,
dynamic_shapes: Optional[Tuple[Any]] = None,
):

if (
symmetric_io_quantization or per_channel_quantization
) and tosa_version == "TOSA-1.0+FP":
raise ValueError("Dont configure quantization with FP TOSA profile.")
if (
symmetric_io_quantization is False
and per_channel_quantization is False
and tosa_version == "TOSA-1.0+INT"
):
raise ValueError("Missing quantization options for INT TOSA profile.")

tosa_profile = TosaSpecification.create_from_string(tosa_version)
compile_spec = common.get_vgf_compile_spec(
tosa_profile, compiler_flags=vgf_compiler_flags, custom_path=custom_path
)

super().__init__(
module,
test_data,
aten_op,
compile_spec,
exir_op,
use_to_edge_transform_and_lower,
dynamic_shapes,
)

if symmetric_io_quantization or per_channel_quantization:
quantizer = VgfQuantizer(compile_spec)
quantization_config = get_symmetric_quantization_config(
is_per_channel=per_channel_quantization
)
if symmetric_io_quantization:
quantizer.set_io(quantization_config)
quant_stage = Quantize(quantizer, quantization_config)
else:
quant_stage = None

if quant_stage:
self.add_stage(self.tester.quantize, quant_stage, pos=0)

self.add_stage_after(
"quantize",
self.tester.check,
[
"torch.ops.quantized_decomposed.dequantize_per_tensor.default",
"torch.ops.quantized_decomposed.quantize_per_tensor.default",
],
suffix="quant_nodes",
)

remove_quant_nodes_stage = (
"to_edge_transform_and_lower"
if use_to_edge_transform_and_lower
else "partition"
)
self.add_stage_after(
remove_quant_nodes_stage,
self.tester.check_not,
[
"torch.ops.quantized_decomposed.dequantize_per_tensor.default",
"torch.ops.quantized_decomposed.quantize_per_tensor.default",
],
suffix="quant_nodes",
)
else:
self.add_stage_after(
"export",
self.tester.check_not,
[
"torch.ops.quantized_decomposed.dequantize_per_tensor.default",
"torch.ops.quantized_decomposed.quantize_per_tensor.default",
],
suffix="quant_nodes",
)

if run_on_vulkan_runtime:
pass
2 changes: 1 addition & 1 deletion backends/arm/vgf_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ def vgf_compile(
additional_flags = " ".join(compile_flags)
vgf_path = tosa_path + ".vgf"
conversion_command = (
f"converter-backend {additional_flags} -i {tosa_path} -o {vgf_path}"
f"model-converter {additional_flags} -i {tosa_path} -o {vgf_path}"
)
try:
subprocess.run(
Expand Down
Loading