Skip to content
This repository was archived by the owner on Jul 24, 2024. It is now read-only.

Commit 3e6e8d9

Browse files
authored
Added xsmm backend for torch_mlir compiler (#70)
Signed-off-by: Gregory Shimansky <[email protected]>
1 parent 122b21f commit 3e6e8d9

File tree

4 files changed

+9
-12
lines changed

4 files changed

+9
-12
lines changed

.github/workflows/test-single-config.yml

+1
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ on:
2121
- torch
2222
- dynamo
2323
- torch_mlir
24+
- torch_mlir_xsmm
2425
- torchscript
2526
- torchscript_onednn
2627
- ipex

.github/workflows/test.yml

+2-10
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,8 @@ jobs:
4040
{device: 'cpu', compiler: 'ipex'},
4141
{device: 'cpu', compiler: 'ipex_onednn_graph'},
4242
# {device: 'xpu', compiler: 'ipex'},
43-
{device: 'cpu', compiler: 'torch_mlir'}
43+
{device: 'cpu', compiler: 'torch_mlir'},
44+
{device: 'cpu', compiler: 'torch_mlir_xsmm'}
4445
]
4546
test_script: ${{ fromJson(inputs.test_scripts) }}
4647
fail-fast: false
@@ -56,12 +57,3 @@ jobs:
5657
test_script: ${{ matrix.test_script }}
5758
secrets:
5859
DB_URL: ${{ secrets.DB_URL }}
59-
60-
shutdown:
61-
needs: mlp_test
62-
if: ${{ contains(inputs.runner_type, 'amd') }} && inputs.shutdown_cloud_runner
63-
runs-on: ${{ inputs.runner_type }}
64-
steps:
65-
- name: shutdown
66-
shell: bash -el {0}
67-
run: sudo shutdown -h +2

dl_bench/cli/launcher.py

+1
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,7 @@ def parse_args():
8585
"ipex",
8686
"ipex_onednn_graph",
8787
"torch_mlir",
88+
"torch_mlir_xsmm",
8889
],
8990
help="Compilation mode to use. No compilation by default.",
9091
)

dl_bench/utils.py

+5-2
Original file line numberDiff line numberDiff line change
@@ -268,7 +268,7 @@ def _compile_model(compile_mode: str, device, model: Module, sample_input, dtype
268268

269269
compiled_model = dynamo.optimize(be.refbackend_torchdynamo_backend)(model)
270270
print("Compiled with torch_mlir (torchscript, inference)")
271-
elif compile_mode == "torch_mlir":
271+
elif compile_mode == "torch_mlir" or compile_mode == "torch_mlir_xsmm":
272272
from torch_mlir._dynamo_fx_importer import import_fx_graph_as_func
273273
from torch_mlir_e2e_test.configs.torchdynamo import jit
274274
from torch_mlir_e2e_test.framework import TestOptions
@@ -277,6 +277,9 @@ def _compile_model(compile_mode: str, device, model: Module, sample_input, dtype
277277
from torch_mlir_e2e_test.linalg_on_tensors_backends.cpuprotobackend import (
278278
CpuProtoLinalgOnTensorsBackend,
279279
)
280+
from torch_mlir_e2e_test.linalg_on_tensors_backends.xsmmprotobackend import (
281+
XsmmProtoLinalgOnTensorsBackend,
282+
)
280283
import torch.utils._pytree as pytree
281284

282285
# debug_timer seems to cause problems:
@@ -290,7 +293,7 @@ def _compile_model(compile_mode: str, device, model: Module, sample_input, dtype
290293
opts,
291294
output_type="linalg-on-tensors",
292295
)
293-
backend = CpuProtoLinalgOnTensorsBackend(opts)
296+
backend = CpuProtoLinalgOnTensorsBackend(opts) if compile_mode == "torch_mlir" else XsmmProtoLinalgOnTensorsBackend(opts)
294297
# backend = RefBackendLinalgOnTensorsBackend()
295298
module = backend.compile(module)
296299
backend_module = backend.load(module)

0 commit comments

Comments
 (0)