diff --git a/.circleci/config.yml b/.circleci/config.yml
index 61774beb06..e02083434f 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -33,7 +33,7 @@ workflows:
third_party/.* lint_only false
tools/.* lint_only false
setup.py lint_only false
- base-revision: dev-1.x
+ base-revision: main
# this is the path of the configuration we should trigger once
# path filtering and pipeline parameter value updates are
# complete. In this case, we are using the parent dynamic
diff --git a/.circleci/test.yml b/.circleci/test.yml
index 12209b062b..a94dcb4d74 100644
--- a/.circleci/test.yml
+++ b/.circleci/test.yml
@@ -84,8 +84,7 @@ commands:
command: |
cd mmdeploy
python -m mim install -r requirements/codebases.txt
- python -m pip install -r requirements/tests.txt
- python -m pip install -r requirements/runtime.txt
+ python -m pip install -r requirements.txt
python -m pip install -U numpy clip numba
cd ..
perform_model_converter_ut:
@@ -145,8 +144,7 @@ jobs:
command: |
docker exec mmdeploy pip install onnxruntime==1.8.1
docker exec mmdeploy mim install -r mmdeploy/requirements/codebases.txt
- docker exec mmdeploy pip install -r mmdeploy/requirements/tests.txt
- docker exec mmdeploy pip install -r mmdeploy/requirements/runtime.txt
+ docker exec mmdeploy pip install -r mmdeploy/requirements.txt
docker exec mmdeploy pip install -U numpy clip numba
- run:
name: Perform Model Converter unittests
@@ -272,11 +270,3 @@ workflows:
version: 1.8.1
requires:
- lint
- - hold:
- type: approval
- requires:
- - test_linux_onnxruntime
- - test_windows_onnxruntime
- - test_linux_tensorrt:
- requires:
- - hold
diff --git a/.github/ISSUE_TEMPLATE/1-bug-report.yml b/.github/ISSUE_TEMPLATE/1-bug-report.yml
index b1ed89775e..85f7d05566 100644
--- a/.github/ISSUE_TEMPLATE/1-bug-report.yml
+++ b/.github/ISSUE_TEMPLATE/1-bug-report.yml
@@ -9,7 +9,7 @@ body:
label: Checklist
options:
- label: I have searched related issues but cannot get the expected help.
- - label: 2. I have read the [FAQ documentation](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/en/faq.md) but cannot get the expected help.
+ - label: 2. I have read the [FAQ documentation](https://github.com/open-mmlab/mmdeploy/tree/main/docs/en/faq.md) but cannot get the expected help.
- label: 3. The bug has not been fixed in the latest version.
- type: textarea
attributes:
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
index 94287d30f6..fd41553b89 100644
--- a/.github/ISSUE_TEMPLATE/config.yml
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -2,7 +2,7 @@ blank_issues_enabled: false
contact_links:
- name: 💥 FAQ
- url: https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/en/faq.md
+ url: https://github.com/open-mmlab/mmdeploy/tree/main/docs/en/faq.md
about: Check if your issue already has solutions
- name: 💬 Forum
url: https://github.com/open-mmlab/mmdeploy/discussions
diff --git a/.github/workflows/backend-ascend.yml b/.github/workflows/backend-ascend.yml
index 9eb7b9a649..f5817af983 100644
--- a/.github/workflows/backend-ascend.yml
+++ b/.github/workflows/backend-ascend.yml
@@ -18,7 +18,7 @@ concurrency:
jobs:
build_sdk_demo:
- runs-on: ubuntu-18.04
+ runs-on: ubuntu-20.04
strategy:
matrix:
python-version: [3.7]
@@ -37,9 +37,7 @@ jobs:
run: |
sudo apt update
sudo apt install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libxrender-dev libc++1-9 libc++abi1-9
- sudo add-apt-repository ppa:ignaciovizzo/opencv3-nonfree
sudo apt install libopencv-dev
- pkg-config --libs opencv
- name: Install Ascend Toolkit
run: |
mkdir -p $GITHUB_WORKSPACE/Ascend
diff --git a/.github/workflows/backend-ncnn.yml b/.github/workflows/backend-ncnn.yml
index 15b1a503a8..2fb2bb8ea7 100644
--- a/.github/workflows/backend-ncnn.yml
+++ b/.github/workflows/backend-ncnn.yml
@@ -18,7 +18,7 @@ concurrency:
jobs:
test_onnx2ncnn:
- runs-on: ubuntu-18.04
+ runs-on: ubuntu-20.04
strategy:
matrix:
python-version: [3.7]
@@ -31,13 +31,11 @@ jobs:
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- - name: Install unittest dependencies
+ - name: Install dependencies
run: |
+ sudo apt update
+ sudo apt install wget gcc-multilib g++-multilib wget libprotobuf-dev protobuf-compiler
python -m pip install cmake onnx
- - name: update
- run: sudo apt update
- - name: gcc-multilib
- run: sudo apt install gcc-multilib g++-multilib wget libprotobuf-dev protobuf-compiler
- name: Install ncnn
run: |
wget https://github.com/Tencent/ncnn/archive/refs/tags/20220420.tar.gz
@@ -60,7 +58,7 @@ jobs:
echo $(pwd)
ln -s build/bin/mmdeploy_onnx2ncnn ./
python .github/scripts/test_onnx2ncnn.py --run 1
- script_install:
+ build_ncnn:
runs-on: ubuntu-20.04
strategy:
matrix:
@@ -78,5 +76,5 @@ jobs:
run: |
python -m pip install torch==1.8.2 torchvision==0.9.2 --extra-index-url https://download.pytorch.org/whl/lts/1.8/cpu
python -m pip install mmcv-lite
- python tools/scripts/build_ubuntu_x64_ncnn.py
+ python tools/scripts/build_ubuntu_x64_ncnn.py 8
python -c 'import mmdeploy.apis.ncnn as ncnn_api; assert ncnn_api.is_available(with_custom_ops=True)'
diff --git a/.github/workflows/backend-ort.yml b/.github/workflows/backend-ort.yml
index c36a3ad0a8..d296270c7b 100644
--- a/.github/workflows/backend-ort.yml
+++ b/.github/workflows/backend-ort.yml
@@ -17,7 +17,7 @@ concurrency:
cancel-in-progress: true
jobs:
- script_install:
+ buils_ort:
runs-on: ubuntu-20.04
strategy:
matrix:
@@ -35,7 +35,7 @@ jobs:
run: |
python -m pip install torch==1.8.2 torchvision==0.9.2 --extra-index-url https://download.pytorch.org/whl/lts/1.8/cpu
python -m pip install mmcv-lite openmim
- python tools/scripts/build_ubuntu_x64_ort.py
+ python tools/scripts/build_ubuntu_x64_ort.py 8
python -c 'import mmdeploy.apis.onnxruntime as ort_api; assert ort_api.is_available(with_custom_ops=True)'
- name: test mmcls full pipeline
run: |
diff --git a/.github/workflows/backend-pplnn.yml b/.github/workflows/backend-pplnn.yml
index a6cda40936..c9daeb2712 100644
--- a/.github/workflows/backend-pplnn.yml
+++ b/.github/workflows/backend-pplnn.yml
@@ -17,23 +17,47 @@ concurrency:
cancel-in-progress: true
jobs:
- script_install:
- runs-on: ubuntu-18.04
- strategy:
- matrix:
- python-version: [3.7]
+ build_pplnn_cuda:
+ runs-on: [self-hosted, linux-3090]
+ container:
+ image: openmmlab/mmdeploy:ubuntu20.04-cuda11.3
+ options: "--gpus=all --ipc=host"
steps:
- name: Checkout repository
uses: actions/checkout@v3
with:
submodules: 'recursive'
- - name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v2
+ - name: Install dependencies
+ run: |
+ apt update && apt install unzip wget
+ python3 -V
+ python3 -m pip install openmim numpy
+ python3 -m pip install -r requirements.txt
+ python3 -m mim install $(cat requirements/codebases.txt | grep mmcls)
+ python3 -m pip list
+ - name: Build SDK
+ run: |
+ bash .circleci/scripts/linux/build.sh "cuda" "pplnn" \
+ -Dpplcv_DIR=${pplcv_DIR} \
+ -Dpplnn_DIR=${pplnn_DIR}
+ ls build/lib
+ - name: Install mmdeploy with pplnn
+ run: |
+ rm -rf .eggs && python3 -m pip install -e .
+ export LD_LIBRARY_PATH="/root/workspace/mmdeploy/build/lib:${LD_LIBRARY_PATH}"
+ python3 tools/check_env.py
+ python3 -c 'import mmdeploy.apis.pplnn as pplnn_api; assert pplnn_api.is_available()'
+
+ build_pplnn_cpu:
+ runs-on: ubuntu-20.04
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v3
with:
- python-version: ${{ matrix.python-version }}
- - name: Install mmdeploy
+ submodules: 'recursive'
+ - name: Install mmdeploy with pplnn
run: |
python -m pip install torch==1.8.2 torchvision==0.9.2 --extra-index-url https://download.pytorch.org/whl/lts/1.8/cpu
- python -m pip install mmcv-lite
- python tools/scripts/build_ubuntu_x64_pplnn.py
+ python -m pip install mmcv-lite protobuf==3.20.2
+ python tools/scripts/build_ubuntu_x64_pplnn.py 8
python -c 'import mmdeploy.apis.pplnn as pplnn_api; assert pplnn_api.is_available()'
diff --git a/.github/workflows/backend-rknn.yml b/.github/workflows/backend-rknn.yml
index 01a8e3a7b5..4a8b804c80 100644
--- a/.github/workflows/backend-rknn.yml
+++ b/.github/workflows/backend-rknn.yml
@@ -19,7 +19,7 @@ concurrency:
jobs:
build_rknpu2:
- runs-on: ubuntu-18.04
+ runs-on: ubuntu-20.04
steps:
- name: Checkout repository
uses: actions/checkout@v3
@@ -31,7 +31,7 @@ jobs:
run: |
sh -x tools/scripts/ubuntu_cross_build_rknn.sh rk3588
build_rknpu:
- runs-on: ubuntu-18.04
+ runs-on: ubuntu-20.04
steps:
- name: Checkout repository
uses: actions/checkout@v3
diff --git a/.github/workflows/backend-snpe.yml b/.github/workflows/backend-snpe.yml
index b74de51fcd..1fec2afffd 100644
--- a/.github/workflows/backend-snpe.yml
+++ b/.github/workflows/backend-snpe.yml
@@ -18,7 +18,7 @@ concurrency:
jobs:
build_sdk_demo:
- runs-on: ubuntu-18.04
+ runs-on: ubuntu-20.04
steps:
- name: Checkout repository
uses: actions/checkout@v3
@@ -31,9 +31,7 @@ jobs:
sudo apt install wget libprotobuf-dev protobuf-compiler
sudo apt update
sudo apt install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libxrender-dev libc++1-9 libc++abi1-9
- sudo add-apt-repository ppa:ignaciovizzo/opencv3-nonfree
sudo apt install libopencv-dev
- pkg-config --libs opencv
- name: Install snpe
run: |
wget https://media.githubusercontent.com/media/tpoisonooo/mmdeploy_snpe_testdata/main/snpe-1.59.tar.gz
diff --git a/.github/workflows/backend-torchscript.yml b/.github/workflows/backend-torchscript.yml
index 2cc149e743..56cb701b4e 100644
--- a/.github/workflows/backend-torchscript.yml
+++ b/.github/workflows/backend-torchscript.yml
@@ -1,4 +1,4 @@
-name: backend-ort
+name: backend-torchscript
on:
push:
@@ -17,8 +17,8 @@ concurrency:
cancel-in-progress: true
jobs:
- script_install:
- runs-on: ubuntu-18.04
+ build_torchscript:
+ runs-on: ubuntu-20.04
strategy:
matrix:
python-version: [3.7]
@@ -33,5 +33,7 @@ jobs:
python-version: ${{ matrix.python-version }}
- name: Install mmdeploy
run: |
+ python -m pip install torch==1.11.0 torchvision==0.12.0 --extra-index-url https://download.pytorch.org/whl/lts/1.11/cpu
python -m pip install mmcv-lite
- python tools/scripts/build_ubuntu_x64_torchscript.py
+ python tools/scripts/build_ubuntu_x64_torchscript.py 8
+ python -c 'from mmdeploy.backend.torchscript import is_available; assert is_available()'
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index 5a9afee1bc..14f05e3a7e 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -22,7 +22,7 @@ concurrency:
jobs:
build_cpu_model_convert:
- runs-on: ubuntu-18.04
+ runs-on: ubuntu-20.04
strategy:
matrix:
python-version: [3.7]
@@ -45,10 +45,11 @@ jobs:
- name: Install unittest dependencies
run: |
python -m pip install openmim
- python -m pip install -r requirements.txt -r requirements/backends.txt
+ python -m pip install -r requirements.txt
+ python -m pip install -r requirements/backends.txt
python -m mim install "mmcv>=2.0.0rc1"
python -m mim install -r requirements/codebases.txt
- python -m pip install -U numpy clip
+ python -m pip install -U numpy clip numba
python -m pip list
- name: Install mmyolo
run: |
@@ -70,7 +71,7 @@ jobs:
pytest tests/test_deploy
build_cpu_sdk:
- runs-on: ubuntu-18.04
+ runs-on: ubuntu-20.04
steps:
- name: Checkout repository
uses: actions/checkout@v3
@@ -83,13 +84,11 @@ jobs:
sudo apt install gcc-multilib g++-multilib wget libprotobuf-dev protobuf-compiler
sudo apt update
sudo apt install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libxrender-dev libc++1-9 libc++abi1-9
- sudo add-apt-repository ppa:ignaciovizzo/opencv3-nonfree
sudo apt install libopencv-dev lcov wget
- pkg-config --libs opencv
- name: Build and run SDK unit test without backend
run: |
mkdir -p build && pushd build
- cmake .. -DCMAKE_CXX_COMPILER=g++-7 -DMMDEPLOY_CODEBASES=all -DMMDEPLOY_BUILD_SDK=ON -DMMDEPLOY_BUILD_SDK_PYTHON_API=OFF -DMMDEPLOY_TARGET_DEVICES=cpu -DMMDEPLOY_COVERAGE=ON -DMMDEPLOY_BUILD_TEST=ON
+ cmake .. -DCMAKE_CXX_COMPILER=g++ -DMMDEPLOY_CODEBASES=all -DMMDEPLOY_BUILD_SDK=ON -DMMDEPLOY_BUILD_SDK_PYTHON_API=OFF -DMMDEPLOY_TARGET_DEVICES=cpu -DMMDEPLOY_COVERAGE=ON -DMMDEPLOY_BUILD_TEST=ON
make -j2
mkdir -p mmdeploy_test_resources/transform
cp ../tests/data/tiger.jpeg mmdeploy_test_resources/transform/
@@ -116,14 +115,13 @@ jobs:
sh -x tools/scripts/ubuntu_cross_build_aarch64.sh
build_cuda102:
- runs-on: ubuntu-18.04
+ runs-on: ubuntu-20.04
container:
image: pytorch/pytorch:1.9.0-cuda10.2-cudnn7-devel
env:
FORCE_CUDA: 1
strategy:
matrix:
- python-version: [3.7]
torch: [1.9.0+cu102]
include:
- torch: 1.9.0+cu102
@@ -131,27 +129,27 @@ jobs:
torchvision: 0.10.0+cu102
steps:
- uses: actions/checkout@v2
- - name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v2
- with:
- python-version: ${{ matrix.python-version }}
- name: Install system dependencies
run: |
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A4B469963BF863CC
- apt-get update && apt-get install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libxrender-dev python${{matrix.python-version}}-dev
+ apt-get update && apt-get install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libxrender-dev
apt-get clean
rm -rf /var/lib/apt/lists/*
- name: Install PyTorch
- run: python -m pip install torch==${{matrix.torch}} torchvision==${{matrix.torchvision}} -f https://download.pytorch.org/whl/torch_stable.html
+ run: |
+ python -V
+ python -m pip install --upgrade pip
+ python -m pip install torch==${{matrix.torch}} torchvision==${{matrix.torchvision}} -f https://download.pytorch.org/whl/torch_stable.html
- name: Install dependencies
run: |
python -V
export CFLAGS=`python -c 'import sysconfig;print("-I"+sysconfig.get_paths()["include"])'`
python -m pip install openmim
- python -m pip install -r requirements.txt -r requirements/backends.txt
+ python -m pip install -r requirements.txt
+ python -m pip install -r requirements/backends.txt
python -m mim install "mmcv>=2.0.0rc1"
CFLAGS=$CFLAGS python -m mim install -r requirements/codebases.txt
- python -m pip install -U pycuda numpy clip
+ python -m pip install -U pycuda numpy clip numba
python -m pip list
- name: Build and install
run: |
@@ -163,43 +161,40 @@ jobs:
coverage xml
coverage report -m
- build_cuda111:
- runs-on: ubuntu-18.04
+ build_cuda113:
+ runs-on: ubuntu-20.04
container:
- image: pytorch/pytorch:1.8.0-cuda11.1-cudnn8-devel
-
+ image: pytorch/pytorch:1.10.0-cuda11.3-cudnn8-devel
strategy:
matrix:
- python-version: [3.7]
- torch: [1.8.0+cu111]
+ torch: [1.10.0+cu113]
include:
- - torch: 1.8.0+cu111
- torch_version: torch1.8
- torchvision: 0.9.0+cu111
-
+ - torch: 1.10.0+cu113
+ torch_version: torch1.10
+ torchvision: 0.11.0+cu113
steps:
- uses: actions/checkout@v2
- - name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v2
- with:
- python-version: ${{ matrix.python-version }}
- name: Install system dependencies
run: |
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A4B469963BF863CC
- apt-get update && apt-get install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libxrender-dev python${{matrix.python-version}}-dev
+ apt-get update && apt-get install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libxrender-dev
apt-get clean
rm -rf /var/lib/apt/lists/*
- name: Install PyTorch
- run: python -m pip install torch==${{matrix.torch}} torchvision==${{matrix.torchvision}} -f https://download.pytorch.org/whl/torch_stable.html
+ run: |
+ python -V
+ python -m pip install --upgrade pip
+ python -m pip install torch==${{matrix.torch}} torchvision==${{matrix.torchvision}} -f https://download.pytorch.org/whl/torch_stable.html
- name: Install dependencies
run: |
python -V
export CFLAGS=`python -c 'import sysconfig;print("-I"+sysconfig.get_paths()["include"])'`
python -m pip install openmim
- python -m pip install -r requirements.txt -r requirements/backends.txt
+ python -m pip install -r requirements.txt
+ python -m pip install -r requirements/backends.txt
python -m mim install "mmcv>=2.0.0rc1"
python -m mim install -r requirements/codebases.txt
- python -m pip install -U pycuda numpy clip
+ python -m pip install -U pycuda numpy clip numba
python -m pip list
- name: Build and install
run: |
@@ -285,7 +280,8 @@ jobs:
conda activate $pwd\tmp_env
python -V
python -m pip install openmim
- python -m pip install -r requirements.txt -r requirements/backends.txt
+ python -m pip install -r requirements.txt
+ python -m pip install -r requirements/backends.txt
python -m mim install "mmcls>=1.0.0rc2"
python -m pip list
- name: Build mmdeploy
diff --git a/.github/workflows/java_api.yml b/.github/workflows/java_api.yml
index 984e27c0f8..8c7f0544c5 100644
--- a/.github/workflows/java_api.yml
+++ b/.github/workflows/java_api.yml
@@ -16,7 +16,7 @@ concurrency:
jobs:
test_java_api:
- runs-on: ubuntu-18.04
+ runs-on: ubuntu-20.04
steps:
- name: Checkout repository
uses: actions/checkout@v3
diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
index c3045171ef..0b05c42a64 100644
--- a/.github/workflows/lint.yml
+++ b/.github/workflows/lint.yml
@@ -4,7 +4,7 @@ on: [push, pull_request]
jobs:
lint:
- runs-on: ubuntu-18.04
+ runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v2
- name: Set up Python 3.7
diff --git a/.github/workflows/prebuild.yml b/.github/workflows/prebuild.yml
index d1b068134c..6e7f6de4ba 100644
--- a/.github/workflows/prebuild.yml
+++ b/.github/workflows/prebuild.yml
@@ -4,7 +4,6 @@ on:
push:
branches:
- main
- - dev-1.x
paths:
- "mmdeploy/version.py"
diff --git a/.github/workflows/quantize.yml b/.github/workflows/quantize.yml
index 361b5cdce3..c1f4625be5 100644
--- a/.github/workflows/quantize.yml
+++ b/.github/workflows/quantize.yml
@@ -18,29 +18,22 @@ concurrency:
jobs:
test_ncnn_PTQ:
- runs-on: ubuntu-18.04
+ runs-on: ubuntu-20.04
container:
image: pytorch/pytorch:1.8.0-cuda11.1-cudnn8-devel
-
strategy:
matrix:
- python-version: [3.7]
torch: [1.8.0+cu111]
include:
- torch: 1.8.0+cu111
torch_version: torch1.8
torchvision: 0.9.0+cu111
-
steps:
- uses: actions/checkout@v2
- - name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v2
- with:
- python-version: ${{ matrix.python-version }}
- name: Install system dependencies
run: |
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A4B469963BF863CC
- apt-get update && apt-get install -y wget ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libxrender-dev python${{matrix.python-version}}-dev
+ apt-get update && apt-get install -y wget ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libxrender-dev
apt-get clean
rm -rf /var/lib/apt/lists/*
- name: Install PyTorch
@@ -48,7 +41,7 @@ jobs:
- name: Install dependencies
run: |
python -V
-
+ python -m pip install --upgrade pip
python -m pip install -r requirements.txt
python -m pip install -U numpy
diff --git a/.github/workflows/rust_api.yml b/.github/workflows/rust_api.yml
index a5ed43fd7f..24cd6e9e82 100644
--- a/.github/workflows/rust_api.yml
+++ b/.github/workflows/rust_api.yml
@@ -16,7 +16,7 @@ concurrency:
jobs:
test_rust_api:
- runs-on: ubuntu-18.04
+ runs-on: ubuntu-20.04
steps:
- name: Checkout repository
uses: actions/checkout@v3
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index dd2359b1f6..ce3dd5287c 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -31,7 +31,7 @@ repos:
rev: 0.7.9
hooks:
- id: mdformat
- args: ["--number"]
+ args: ["--number", "--disable-escape", "link-enclosure"]
additional_dependencies:
- mdformat-openmmlab
- mdformat_frontmatter
diff --git a/README.md b/README.md
index 263c266903..07704eec9d 100644
--- a/README.md
+++ b/README.md
@@ -18,10 +18,10 @@
-[![docs](https://img.shields.io/badge/docs-latest-blue)](https://mmdeploy.readthedocs.io/en/1.x/)
+[![docs](https://img.shields.io/badge/docs-latest-blue)](https://mmdeploy.readthedocs.io/en/latest/)
[![badge](https://github.com/open-mmlab/mmdeploy/workflows/build/badge.svg)](https://github.com/open-mmlab/mmdeploy/actions)
-[![codecov](https://codecov.io/gh/open-mmlab/mmdeploy/branch/1.x/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmdeploy)
-[![license](https://img.shields.io/github/license/open-mmlab/mmdeploy.svg)](https://github.com/open-mmlab/mmdeploy/tree/1.x/LICENSE)
+[![codecov](https://codecov.io/gh/open-mmlab/mmdeploy/branch/main/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmdeploy)
+[![license](https://img.shields.io/github/license/open-mmlab/mmdeploy.svg)](https://github.com/open-mmlab/mmdeploy/tree/main/LICENSE)
[![issue resolution](https://img.shields.io/github/issues-closed-raw/open-mmlab/mmdeploy)](https://github.com/open-mmlab/mmdeploy/issues)
[![open issues](https://img.shields.io/github/issues-raw/open-mmlab/mmdeploy)](https://github.com/open-mmlab/mmdeploy/issues)
@@ -43,6 +43,16 @@ English | [简体中文](README_zh-CN.md)
+## Highlights
+
+The MMDeploy 1.x has been released, which is adapted to upstream codebases from OpenMMLab 2.0. Please **align the version** when using it.
+The default branch has been switched to `main` from `master`. MMDeploy 0.x (`master`) will be deprecated and new features will only be added to MMDeploy 1.x (`main`) in future.
+
+| mmdeploy | mmengine | mmcv | mmdet | others |
+| :------: | :------: | :------: | :------: | :----: |
+| 0.x.y | - | \<=1.x.y | \<=2.x.y | 0.x.y |
+| 1.x.y | 0.x.y | 2.x.y | 3.x.y | 1.x.y |
+
## Introduction
MMDeploy is an open-source deep learning model deployment toolset. It is a part of the [OpenMMLab](https://openmmlab.com/) project.
@@ -73,24 +83,24 @@ The supported Device-Platform-InferenceBackend matrix is presented as following,
The benchmark can be found from [here](docs/en/03-benchmark/benchmark.md)
-| Device / Platform | Linux | Windows | macOS | Android |
-| ----------------- | ------------------------------------------------------------------------ | --------------------------------------- | -------- | ---------------- |
-| x86_64 CPU | ✔️ONNX Runtime
✔️pplnn
✔️ncnn
✔️OpenVINO
✔️LibTorch
✔️TVM | ✔️ONNX Runtime
✔️OpenVINO | - | - |
-| ARM CPU | ✔️ncnn | - | - | ✔️ncnn |
-| RISC-V | ✔️ncnn | - | - | - |
-| NVIDIA GPU | ✔️ONNX Runtime
✔️TensorRT
✔️pplnn
✔️LibTorch
✔️TVM | ✔️ONNX Runtime
✔️TensorRT
✔️pplnn | - | - |
-| NVIDIA Jetson | ✔️TensorRT | ✔️TensorRT | - | - |
-| Huawei ascend310 | ✔️CANN | - | - | - |
-| Rockchip | ✔️RKNN | - | - | - |
-| Apple M1 | - | - | ✔️CoreML | - |
-| Adreno GPU | - | - | - | ✔️ncnn
✔️SNPE |
-| Hexagon DSP | - | - | - | ✔️SNPE |
+| Device / Platform | Linux | Windows | macOS | Android |
+| ----------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| x86_64 CPU | [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-ort.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-ort.yml)ONNXRuntime
[![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-pplnn.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-pplnn.yml)pplnn
[![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-ncnn.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-ncnn.yml)ncnn
[![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-torchscript.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-torchscript.yml)LibTorch
![](https://img.shields.io/badge/build-no%20status-lightgrey)OpenVINO
![](https://img.shields.io/badge/build-no%20status-lightgrey)TVM
| [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/build.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/build.yml)ONNXRuntime
![](https://img.shields.io/badge/build-no%20status-lightgrey)OpenVINO
[![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/build.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/build.yml)ncnn
| - | - |
+| ARM CPU | [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/build.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/build.yml)ncnn
| - | - | [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-rknn.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-rknn.yml)ncnn
|
+| RISC-V | [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/linux-riscv64-gcc.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/linux-riscv64-gcc.yml)ncnn
| - | - | - |
+| NVIDIA GPU | [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/build.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/build.yml)ONNXRuntime
[![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/build.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/build.yml)TensorRT
![](https://img.shields.io/badge/build-no%20status-lightgrey)LibTorch
[![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-pplnn.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-pplnn.yml)pplnn
| [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/build.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/build.yml)ONNXRuntime
[![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/build.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/build.yml)TensorRT
| - | - |
+| NVIDIA Jetson | ![](https://img.shields.io/badge/build-no%20status-lightgrey)TensorRT
| - | - | - |
+| Huawei ascend310 | [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-ascend.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-ascend.yml)CANN
| - | - | - |
+| Rockchip | [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-rknn.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-rknn.yml)RKNN
| - | - | - |
+| Apple M1 | - | - | [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-coreml.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-coreml.yml)CoreML
| - |
+| Adreno GPU | - | - | - | [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-snpe.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-snpe.yml)SNPE
[![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-rknn.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-rknn.yml)ncnn
|
+| Hexagon DSP | - | - | - | [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-snpe.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-snpe.yml)SNPE
|
### Efficient and scalable C/C++ SDK Framework
All kinds of modules in the SDK can be extended, such as `Transform` for image processing, `Net` for Neural Network inference, `Module` for postprocessing and so on
-## [Documentation](https://mmdeploy.readthedocs.io/en/1.x/)
+## [Documentation](https://mmdeploy.readthedocs.io/en/latest/)
Please read [getting_started](docs/en/get_started.md) for the basic usage of MMDeploy. We also provide tutoials about:
diff --git a/README_zh-CN.md b/README_zh-CN.md
index 068ff89442..9889741cf7 100644
--- a/README_zh-CN.md
+++ b/README_zh-CN.md
@@ -19,15 +19,25 @@
-[![docs](https://img.shields.io/badge/docs-latest-blue)](https://mmdeploy.readthedocs.io/zh_CN/1.x/)
+[![docs](https://img.shields.io/badge/docs-latest-blue)](https://mmdeploy.readthedocs.io/zh_CN/latest/)
[![badge](https://github.com/open-mmlab/mmdeploy/workflows/build/badge.svg)](https://github.com/open-mmlab/mmdeploy/actions)
-[![codecov](https://codecov.io/gh/open-mmlab/mmdeploy/branch/1.x/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmdeploy)
-[![license](https://img.shields.io/github/license/open-mmlab/mmdeploy.svg)](https://github.com/open-mmlab/mmdeploy/tree/1.x/LICENSE)
+[![codecov](https://codecov.io/gh/open-mmlab/mmdeploy/branch/main/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmdeploy)
+[![license](https://img.shields.io/github/license/open-mmlab/mmdeploy.svg)](https://github.com/open-mmlab/mmdeploy/tree/main/LICENSE)
[![issue resolution](https://img.shields.io/github/issues-closed-raw/open-mmlab/mmdeploy)](https://github.com/open-mmlab/mmdeploy/issues)
[![open issues](https://img.shields.io/github/issues-raw/open-mmlab/mmdeploy)](https://github.com/open-mmlab/mmdeploy/issues)
[English](README.md) | 简体中文
+## MMDeploy 1.x 版本
+
+全新的 MMDeploy 1.x 已发布,该版本适配OpenMMLab 2.0生态体系,使用时务必**对齐版本**。
+MMDeploy 代码库默认分支从`master`切换至`main`。 MMDeploy 0.x (`master`)将逐步废弃,新特性将只添加到 MMDeploy 1.x (`main`)。
+
+| mmdeploy | mmengine | mmcv | mmdet | mmcls and others |
+| :------: | :------: | :------: | :------: | :--------------: |
+| 0.x.y | - | \<=1.x.y | \<=2.x.y | 0.x.y |
+| 1.x.y | 0.x.y | 2.x.y | 3.x.y | 1.x.y |
+
## 介绍
MMDeploy 是 [OpenMMLab](https://openmmlab.com/) 模型部署工具箱,**为各算法库提供统一的部署体验**。基于 MMDeploy,开发者可以轻松从训练 repo 生成指定硬件所需 SDK,省去大量适配时间。
@@ -56,18 +66,18 @@ MMDeploy 是 [OpenMMLab](https://openmmlab.com/) 模型部署工具箱,**为
支持的设备平台和推理引擎如下表所示。benchmark请参考[这里](docs/zh_cn/03-benchmark/benchmark.md)
-| Device / Platform | Linux | Windows | macOS | Android |
-| ----------------- | ------------------------------------------------------------------------ | --------------------------------------- | -------- | ---------------- |
-| x86_64 CPU | ✔️ONNX Runtime
✔️pplnn
✔️ncnn
✔️OpenVINO
✔️LibTorch
✔️TVM | ✔️ONNX Runtime
✔️OpenVINO | - | - |
-| ARM CPU | ✔️ncnn | - | - | ✔️ncnn |
-| RISC-V | ✔️ncnn | - | - | - |
-| NVIDIA GPU | ✔️ONNX Runtime
✔️TensorRT
✔️pplnn
✔️LibTorch
✔️TVM | ✔️ONNX Runtime
✔️TensorRT
✔️pplnn | - | - |
-| NVIDIA Jetson | ✔️TensorRT | ✔️TensorRT | - | - |
-| Huawei ascend310 | ✔️CANN | - | - | - |
-| Rockchip | ✔️RKNN | - | - | - |
-| Apple M1 | - | - | ✔️CoreML | - |
-| Adreno GPU | - | - | - | ✔️ncnn
✔️SNPE |
-| Hexagon DSP | - | - | - | ✔️SNPE |
+| Device / Platform | Linux | Windows | macOS | Android |
+| ----------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| x86_64 CPU | [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-ort.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-ort.yml)ONNXRuntime
[![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-pplnn.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-pplnn.yml)pplnn
[![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-ncnn.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-ncnn.yml)ncnn
[![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-torchscript.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-torchscript.yml)LibTorch
![](https://img.shields.io/badge/build-no%20status-lightgrey)OpenVINO
![](https://img.shields.io/badge/build-no%20status-lightgrey)TVM
| [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/build.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/build.yml)ONNXRuntime
![](https://img.shields.io/badge/build-no%20status-lightgrey)OpenVINO
[![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/build.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/build.yml)ncnn
| - | - |
+| ARM CPU | [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/build.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/build.yml)ncnn
| - | - | [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-rknn.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-rknn.yml)ncnn
|
+| RISC-V | [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/linux-riscv64-gcc.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/linux-riscv64-gcc.yml)ncnn
| - | - | - |
+| NVIDIA GPU | [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/build.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/build.yml)ONNXRuntime
[![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/build.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/build.yml)TensorRT
![](https://img.shields.io/badge/build-no%20status-lightgrey)LibTorch
[![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-pplnn.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-pplnn.yml)pplnn
| [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/build.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/build.yml)ONNXRuntime
[![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/build.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/build.yml)TensorRT
| - | - |
+| NVIDIA Jetson | ![](https://img.shields.io/badge/build-no%20status-lightgrey)TensorRT
| - | - | - |
+| Huawei ascend310 | [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-ascend.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-ascend.yml)CANN
| - | - | - |
+| Rockchip | [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-rknn.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-rknn.yml)RKNN
| - | - | - |
+| Apple M1 | - | - | [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-coreml.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-coreml.yml)CoreML
| - |
+| Adreno GPU | - | - | - | [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-snpe.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-snpe.yml)SNPE
[![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-rknn.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-rknn.yml)ncnn
|
+| Hexagon DSP | - | - | - | [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-snpe.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-snpe.yml)SNPE
|
### SDK 可高度定制化
@@ -75,7 +85,7 @@ MMDeploy 是 [OpenMMLab](https://openmmlab.com/) 模型部署工具箱,**为
- Net 推理
- Module 后处理
-## [中文文档](https://mmdeploy.readthedocs.io/zh_CN/1.x/)
+## [中文文档](https://mmdeploy.readthedocs.io/zh_CN/latest/)
- [快速上手](docs/zh_cn/get_started.md)
- [编译](docs/zh_cn/01-how-to-build/build_from_source.md)
@@ -119,7 +129,7 @@ MMDeploy 是 [OpenMMLab](https://openmmlab.com/) 模型部署工具箱,**为
## 基准与模型库
-基准和支持的模型列表可以在[基准](https://mmdeploy.readthedocs.io/zh_CN/1.x/03-benchmark/benchmark.html)和[模型列表](https://mmdeploy.readthedocs.io/en/1.x/03-benchmark/supported_models.html)中获得。
+基准和支持的模型列表可以在[基准](https://mmdeploy.readthedocs.io/zh_CN/latest/03-benchmark/benchmark.html)和[模型列表](https://mmdeploy.readthedocs.io/en/latest/03-benchmark/supported_models.html)中获得。
## 贡献指南
@@ -176,9 +186,9 @@ MMDeploy 是 [OpenMMLab](https://openmmlab.com/) 模型部署工具箱,**为
扫描下方的二维码可关注 OpenMMLab 团队的 [知乎官方账号](https://www.zhihu.com/people/openmmlab),加入 OpenMMLab 团队的 [官方交流 QQ 群](https://jq.qq.com/?_wv=1027&k=MSMAfWOe),或添加微信小助手”OpenMMLabwx“加入官方交流微信群。
我们会在 OpenMMLab 社区为大家
diff --git a/csrc/mmdeploy/apis/csharp/README.md b/csrc/mmdeploy/apis/csharp/README.md
index 16b345d8ff..241efdf601 100644
--- a/csrc/mmdeploy/apis/csharp/README.md
+++ b/csrc/mmdeploy/apis/csharp/README.md
@@ -33,10 +33,10 @@ There are two methods to build the nuget package.
(*option 1*) Use the command.
-If your environment is well prepared, you can just go to the `csrc\apis\csharp` folder, open a terminal and type the following command, the nupkg will be built in `csrc\apis\csharp\MMDeploy\bin\Release\MMDeployCSharp.1.0.0-rc2.nupkg`.
+If your environment is well prepared, you can just go to the `csrc\apis\csharp` folder, open a terminal and type the following command, the nupkg will be built in `csrc\apis\csharp\MMDeploy\bin\Release\MMDeployCSharp.1.0.0.nupkg`.
```shell
-dotnet build --configuration Release -p:Version=1.0.0-rc2
+dotnet build --configuration Release -p:Version=1.0.0
```
(*option 2*) Open MMDeploy.sln && Build.
diff --git a/demo/csharp/image_classification/image_classification.csproj b/demo/csharp/image_classification/image_classification.csproj
index de0494c844..a0e86ea43f 100644
--- a/demo/csharp/image_classification/image_classification.csproj
+++ b/demo/csharp/image_classification/image_classification.csproj
@@ -14,7 +14,7 @@
-
+
diff --git a/demo/csharp/image_restorer/image_restorer.csproj b/demo/csharp/image_restorer/image_restorer.csproj
index 15b887841a..0c23d8d4cf 100644
--- a/demo/csharp/image_restorer/image_restorer.csproj
+++ b/demo/csharp/image_restorer/image_restorer.csproj
@@ -14,7 +14,7 @@
-
+
diff --git a/demo/csharp/image_segmentation/image_segmentation.csproj b/demo/csharp/image_segmentation/image_segmentation.csproj
index 15b887841a..0c23d8d4cf 100644
--- a/demo/csharp/image_segmentation/image_segmentation.csproj
+++ b/demo/csharp/image_segmentation/image_segmentation.csproj
@@ -14,7 +14,7 @@
-
+
diff --git a/demo/csharp/object_detection/object_detection.csproj b/demo/csharp/object_detection/object_detection.csproj
index 259e635c94..f52ab3532c 100644
--- a/demo/csharp/object_detection/object_detection.csproj
+++ b/demo/csharp/object_detection/object_detection.csproj
@@ -14,7 +14,7 @@
-
+
diff --git a/demo/csharp/ocr_detection/ocr_detection.csproj b/demo/csharp/ocr_detection/ocr_detection.csproj
index 15b887841a..0c23d8d4cf 100644
--- a/demo/csharp/ocr_detection/ocr_detection.csproj
+++ b/demo/csharp/ocr_detection/ocr_detection.csproj
@@ -14,7 +14,7 @@
-
+
diff --git a/demo/csharp/ocr_recognition/ocr_recognition.csproj b/demo/csharp/ocr_recognition/ocr_recognition.csproj
index 15b887841a..0c23d8d4cf 100644
--- a/demo/csharp/ocr_recognition/ocr_recognition.csproj
+++ b/demo/csharp/ocr_recognition/ocr_recognition.csproj
@@ -14,7 +14,7 @@
-
+
diff --git a/demo/csharp/pose_detection/pose_detection.csproj b/demo/csharp/pose_detection/pose_detection.csproj
index 15b887841a..0c23d8d4cf 100644
--- a/demo/csharp/pose_detection/pose_detection.csproj
+++ b/demo/csharp/pose_detection/pose_detection.csproj
@@ -14,7 +14,7 @@
-
+
diff --git a/demo/tutorials/tutorials_1.ipynb b/demo/tutorials/tutorials_1.ipynb
index c5644ae00e..bfa561e024 100755
--- a/demo/tutorials/tutorials_1.ipynb
+++ b/demo/tutorials/tutorials_1.ipynb
@@ -6,7 +6,7 @@
"id": "mAWHDEbr6Q2i"
},
"source": [
- "[![Open in colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/open-mmlab/mmdeploy/tree/1.x/demo/tutorials_1.ipynb)\n",
+ "[![Open in colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/open-mmlab/mmdeploy/tree/main/demo/tutorials_1.ipynb)\n",
"# 前言\n",
"OpenMMLab 的算法如何部署?是很多社区用户的困惑。而模型部署工具箱 [MMDeploy](https://zhuanlan.zhihu.com/p/450342651) 的开源,强势打通了从算法模型到应用程序这 \"最后一公里\"!\n",
"今天我们将开启模型部署入门系列教程,在模型部署开源库 MMDeploy 的辅助下,介绍以下内容:\n",
diff --git a/docker/CPU/Dockerfile b/docker/CPU/Dockerfile
index 87a068fd9e..7cf6d36f43 100644
--- a/docker/CPU/Dockerfile
+++ b/docker/CPU/Dockerfile
@@ -85,9 +85,9 @@ ENV PATH="/root/workspace/ncnn/build/tools/quantize/:${PATH}"
### install mmdeploy
WORKDIR /root/workspace
ARG VERSION
-RUN git clone -b 1.x https://github.com/open-mmlab/mmdeploy.git &&\
+RUN git clone -b main https://github.com/open-mmlab/mmdeploy.git &&\
cd mmdeploy &&\
- if [ -z ${VERSION} ] ; then echo "No MMDeploy version passed in, building on 1.x" ; else git checkout tags/v${VERSION} -b tag_v${VERSION} ; fi &&\
+ if [ -z ${VERSION} ] ; then echo "No MMDeploy version passed in, building on main" ; else git checkout tags/v${VERSION} -b tag_v${VERSION} ; fi &&\
git submodule update --init --recursive &&\
rm -rf build &&\
mkdir build &&\
@@ -114,4 +114,4 @@ RUN cd mmdeploy && rm -rf build/CM* && mkdir -p build && cd build && cmake .. \
-DMMDEPLOY_CODEBASES=all &&\
cmake --build . -- -j$(nproc) && cmake --install . &&\
export SPDLOG_LEVEL=warn &&\
- if [ -z ${VERSION} ] ; then echo "Built MMDeploy 1.x for CPU devices successfully!" ; else echo "Built MMDeploy version v${VERSION} for CPU devices successfully!" ; fi
+ if [ -z ${VERSION} ] ; then echo "Built MMDeploy main for CPU devices successfully!" ; else echo "Built MMDeploy version v${VERSION} for CPU devices successfully!" ; fi
diff --git a/docker/GPU/Dockerfile b/docker/GPU/Dockerfile
index e39b7d71bf..e552fd44ad 100644
--- a/docker/GPU/Dockerfile
+++ b/docker/GPU/Dockerfile
@@ -65,9 +65,9 @@ RUN cp -r /usr/local/lib/python${PYTHON_VERSION}/dist-packages/tensorrt* /opt/co
ENV ONNXRUNTIME_DIR=/root/workspace/onnxruntime-linux-x64-${ONNXRUNTIME_VERSION}
ENV TENSORRT_DIR=/workspace/tensorrt
ARG VERSION
-RUN git clone -b 1.x https://github.com/open-mmlab/mmdeploy &&\
+RUN git clone -b main https://github.com/open-mmlab/mmdeploy &&\
cd mmdeploy &&\
- if [ -z ${VERSION} ] ; then echo "No MMDeploy version passed in, building on 1.x" ; else git checkout tags/v${VERSION} -b tag_v${VERSION} ; fi &&\
+ if [ -z ${VERSION} ] ; then echo "No MMDeploy version passed in, building on main" ; else git checkout tags/v${VERSION} -b tag_v${VERSION} ; fi &&\
git submodule update --init --recursive &&\
mkdir -p build &&\
cd build &&\
@@ -101,6 +101,6 @@ RUN cd /root/workspace/mmdeploy &&\
-DMMDEPLOY_CODEBASES=all &&\
make -j$(nproc) && make install &&\
export SPDLOG_LEVEL=warn &&\
- if [ -z ${VERSION} ] ; then echo "Built MMDeploy dev-1.x for GPU devices successfully!" ; else echo "Built MMDeploy version v${VERSION} for GPU devices successfully!" ; fi
+ if [ -z ${VERSION} ] ; then echo "Built MMDeploy for GPU devices successfully!" ; else echo "Built MMDeploy version v${VERSION} for GPU devices successfully!" ; fi
ENV LD_LIBRARY_PATH="/root/workspace/mmdeploy/build/lib:${BACKUP_LD_LIBRARY_PATH}"
diff --git a/docs/en/01-how-to-build/android.md b/docs/en/01-how-to-build/android.md
index a153c34567..ab1081bbc6 100644
--- a/docs/en/01-how-to-build/android.md
+++ b/docs/en/01-how-to-build/android.md
@@ -97,7 +97,7 @@ make -j$(nproc) install
OpenJDK |
It is necessary for building Java API.
- See Java API build for building tutorials.
+ See Java API build for building tutorials.
|
diff --git a/docs/en/01-how-to-build/build_from_docker.md b/docs/en/01-how-to-build/build_from_docker.md
index 816c2c77b1..22d9acbab7 100644
--- a/docs/en/01-how-to-build/build_from_docker.md
+++ b/docs/en/01-how-to-build/build_from_docker.md
@@ -51,7 +51,7 @@ docker run --gpus all -it mmdeploy:master-gpu
As described [here](https://forums.developer.nvidia.com/t/cuda-error-the-provided-ptx-was-compiled-with-an-unsupported-toolchain/185754), update the GPU driver to the latest one for your GPU.
-2. docker: Error response from daemon: could not select device driver "" with capabilities: \[gpu\].
+2. docker: Error response from daemon: could not select device driver "" with capabilities: [gpu].
```
# Add the package repositories
diff --git a/docs/en/01-how-to-build/build_from_source.md b/docs/en/01-how-to-build/build_from_source.md
index 6fa742c7dd..2c272e3db7 100644
--- a/docs/en/01-how-to-build/build_from_source.md
+++ b/docs/en/01-how-to-build/build_from_source.md
@@ -3,7 +3,7 @@
## Download
```shell
-git clone -b 1.x git@github.com:open-mmlab/mmdeploy.git --recursive
+git clone -b main git@github.com:open-mmlab/mmdeploy.git --recursive
```
Note:
@@ -26,7 +26,7 @@ Note:
- If it fails when `git clone` via `SSH`, you can try the `HTTPS` protocol like this:
```shell
- git clone -b 1.x https://github.com/open-mmlab/mmdeploy.git --recursive
+ git clone -b main https://github.com/open-mmlab/mmdeploy.git --recursive
```
## Build
diff --git a/docs/en/01-how-to-build/jetsons.md b/docs/en/01-how-to-build/jetsons.md
index 03ce9db7b2..01a5b049e0 100644
--- a/docs/en/01-how-to-build/jetsons.md
+++ b/docs/en/01-how-to-build/jetsons.md
@@ -237,7 +237,7 @@ It takes about 15 minutes to install ppl.cv on a Jetson Nano. So, please be pati
## Install MMDeploy
```shell
-git clone -b 1.x --recursive https://github.com/open-mmlab/mmdeploy.git
+git clone -b main --recursive https://github.com/open-mmlab/mmdeploy.git
cd mmdeploy
export MMDEPLOY_DIR=$(pwd)
```
@@ -305,7 +305,7 @@ pip install -v -e . # or "python setup.py develop"
2. Follow [this document](../02-how-to-run/convert_model.md) on how to convert model files.
-For this example, we have used [retinanet_r18_fpn_1x_coco.py](https://github.com/open-mmlab/mmdetection/blob/3.x/configs/retinanet/retinanet_r18_fpn_1x_coco.py) as the model config, and [this file](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r18_fpn_1x_coco/retinanet_r18_fpn_1x_coco_20220407_171055-614fd399.pth) as the corresponding checkpoint file. Also for deploy config, we have used [detection_tensorrt_dynamic-320x320-1344x1344.py](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmdet/detection/detection_tensorrt_dynamic-320x320-1344x1344.py)
+For this example, we have used [retinanet_r18_fpn_1x_coco.py](https://github.com/open-mmlab/mmdetection/blob/3.x/configs/retinanet/retinanet_r18_fpn_1x_coco.py) as the model config, and [this file](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r18_fpn_1x_coco/retinanet_r18_fpn_1x_coco_20220407_171055-614fd399.pth) as the corresponding checkpoint file. Also for deploy config, we have used [detection_tensorrt_dynamic-320x320-1344x1344.py](https://github.com/open-mmlab/mmdeploy/tree/main/configs/mmdet/detection/detection_tensorrt_dynamic-320x320-1344x1344.py)
```shell
python ./tools/deploy.py \
diff --git a/docs/en/01-how-to-build/rockchip.md b/docs/en/01-how-to-build/rockchip.md
index 3ecb645490..431b5d346d 100644
--- a/docs/en/01-how-to-build/rockchip.md
+++ b/docs/en/01-how-to-build/rockchip.md
@@ -140,7 +140,7 @@ label: 65, score: 0.95
- MMDet models.
- YOLOV3 & YOLOX: you may paste the following partition configuration into [detection_rknn_static-320x320.py](https://github.com/open-mmlab/mmdeploy/blob/1.x/configs/mmdet/detection/detection_rknn-int8_static-320x320.py):
+ YOLOV3 & YOLOX: you may paste the following partition configuration into [detection_rknn_static-320x320.py](https://github.com/open-mmlab/mmdeploy/blob/main/configs/mmdet/detection/detection_rknn-int8_static-320x320.py):
```python
# yolov3, yolox for rknn-toolkit and rknn-toolkit2
@@ -156,7 +156,7 @@ label: 65, score: 0.95
])
```
- RTMDet: you may paste the following partition configuration into [detection_rknn-int8_static-640x640.py](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/configs/mmdet/detection/detection_rknn-int8_static-640x640.py):
+ RTMDet: you may paste the following partition configuration into [detection_rknn-int8_static-640x640.py](https://github.com/open-mmlab/mmdeploy/blob/main/configs/mmdet/detection/detection_rknn-int8_static-640x640.py):
```python
# rtmdet for rknn-toolkit and rknn-toolkit2
@@ -172,7 +172,7 @@ label: 65, score: 0.95
])
```
- RetinaNet & SSD & FSAF with rknn-toolkit2, you may paste the following partition configuration into [detection_rknn_static-320x320.py](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/configs/mmdet/detection/detection_rknn-int8_static-320x320.py). Users with rknn-toolkit can directly use default config.
+ RetinaNet & SSD & FSAF with rknn-toolkit2, you may paste the following partition configuration into [detection_rknn_static-320x320.py](https://github.com/open-mmlab/mmdeploy/blob/main/configs/mmdet/detection/detection_rknn-int8_static-320x320.py). Users with rknn-toolkit can directly use default config.
```python
# retinanet, ssd for rknn-toolkit2
diff --git a/docs/en/02-how-to-run/prebuilt_package_windows.md b/docs/en/02-how-to-run/prebuilt_package_windows.md
index 6952ad4b7b..5cb513f806 100644
--- a/docs/en/02-how-to-run/prebuilt_package_windows.md
+++ b/docs/en/02-how-to-run/prebuilt_package_windows.md
@@ -21,7 +21,7 @@
______________________________________________________________________
-This tutorial takes `mmdeploy-1.0.0rc3-windows-amd64.zip` and `mmdeploy-1.0.0rc3-windows-amd64-cuda11.3.zip` as examples to show how to use the prebuilt packages. The former support onnxruntime cpu inference, the latter support onnxruntime-gpu and tensorrt inference.
+This tutorial takes `mmdeploy-1.0.0-windows-amd64.zip` and `mmdeploy-1.0.0-windows-amd64-cuda11.3.zip` as examples to show how to use the prebuilt packages. The former support onnxruntime cpu inference, the latter support onnxruntime-gpu and tensorrt inference.
The directory structure of the prebuilt package is as follows, where the `dist` folder is about model converter, and the `sdk` folder is related to model inference.
@@ -48,7 +48,7 @@ In order to use the prebuilt package, you need to install some third-party depen
2. Clone the mmdeploy repository
```bash
- git clone -b 1.x https://github.com/open-mmlab/mmdeploy.git
+ git clone -b main https://github.com/open-mmlab/mmdeploy.git
```
:point_right: The main purpose here is to use the configs, so there is no need to compile `mmdeploy`.
@@ -56,7 +56,7 @@ In order to use the prebuilt package, you need to install some third-party depen
3. Install mmclassification
```bash
- git clone -b 1.x https://github.com/open-mmlab/mmclassification.git
+ git clone -b main https://github.com/open-mmlab/mmclassification.git
cd mmclassification
pip install -e .
```
@@ -81,8 +81,8 @@ In order to use `ONNX Runtime` backend, you should also do the following steps.
5. Install `mmdeploy` (Model Converter) and `mmdeploy_runtime` (SDK Python API).
```bash
- pip install mmdeploy==1.0.0rc3
- pip install mmdeploy-runtime==1.0.0rc3
+ pip install mmdeploy==1.0.0
+ pip install mmdeploy-runtime==1.0.0
```
:point_right: If you have installed it before, please uninstall it first.
@@ -100,7 +100,7 @@ In order to use `ONNX Runtime` backend, you should also do the following steps.
![sys-path](https://user-images.githubusercontent.com/16019484/181463801-1d7814a8-b256-46e9-86f2-c08de0bc150b.png)
:exclamation: Restart powershell to make the environment variables setting take effect. You can check whether the settings are in effect by `echo $env:PATH`.
-8. Download SDK C/cpp Library mmdeploy-1.0.0rc3-windows-amd64.zip
+8. Download SDK C/cpp Library mmdeploy-1.0.0-windows-amd64.zip
### TensorRT
@@ -109,8 +109,8 @@ In order to use `TensorRT` backend, you should also do the following steps.
5. Install `mmdeploy` (Model Converter) and `mmdeploy_runtime` (SDK Python API).
```bash
- pip install mmdeploy==1.0.0rc3
- pip install mmdeploy-runtime-gpu==1.0.0rc3
+ pip install mmdeploy==1.0.0
+ pip install mmdeploy-runtime-gpu==1.0.0
```
:point_right: If you have installed it before, please uninstall it first.
@@ -129,7 +129,7 @@ In order to use `TensorRT` backend, you should also do the following steps.
7. Install pycuda by `pip install pycuda`
-8. Download SDK C/cpp Library mmdeploy-1.0.0rc3-windows-amd64-cuda11.3.zip
+8. Download SDK C/cpp Library mmdeploy-1.0.0-windows-amd64-cuda11.3.zip
## Model Convert
@@ -141,7 +141,7 @@ After preparation work, the structure of the current working directory should be
```
..
-|-- mmdeploy-1.0.0rc3-windows-amd64
+|-- mmdeploy-1.0.0-windows-amd64
|-- mmclassification
|-- mmdeploy
`-- resnet18_8xb32_in1k_20210831-fbbb1da6.pth
@@ -189,7 +189,7 @@ After installation of mmdeploy-tensorrt prebuilt package, the structure of the c
```
..
-|-- mmdeploy-1.0.0rc3-windows-amd64-cuda11.3
+|-- mmdeploy-1.0.0-windows-amd64-cuda11.3
|-- mmclassification
|-- mmdeploy
`-- resnet18_8xb32_in1k_20210831-fbbb1da6.pth
@@ -252,8 +252,8 @@ The structure of current working directory:
```
.
-|-- mmdeploy-1.0.0rc3-windows-amd64-cuda11.1-tensorrt8.2.3.0
-|-- mmdeploy-1.0.0rc3-windows-amd64-onnxruntime1.8.1
+|-- mmdeploy-1.0.0-windows-amd64
+|-- mmdeploy-1.0.0-windows-amd64-cuda11.3
|-- mmclassification
|-- mmdeploy
|-- resnet18_8xb32_in1k_20210831-fbbb1da6.pth
@@ -324,7 +324,7 @@ The following describes how to use the SDK's C API for inference
It is recommended to use `CMD` here.
- Under `mmdeploy-1.0.0rc3-windows-amd64\\example\\cpp\\build\\Release` directory:
+ Under `mmdeploy-1.0.0-windows-amd64\\example\\cpp\\build\\Release` directory:
```
.\image_classification.exe cpu C:\workspace\work_dir\onnx\resnet\ C:\workspace\mmclassification\demo\demo.JPEG
@@ -344,7 +344,7 @@ The following describes how to use the SDK's C API for inference
It is recommended to use `CMD` here.
- Under `mmdeploy-1.0.0rc3-windows-amd64-cuda11.3\\example\\cpp\\build\\Release` directory
+ Under `mmdeploy-1.0.0-windows-amd64-cuda11.3\\example\\cpp\\build\\Release` directory
```
.\image_classification.exe cuda C:\workspace\work_dir\trt\resnet C:\workspace\mmclassification\demo\demo.JPEG
diff --git a/docs/en/02-how-to-run/useful_tools.md b/docs/en/02-how-to-run/useful_tools.md
index ee69c51a2b..a402f2113d 100644
--- a/docs/en/02-how-to-run/useful_tools.md
+++ b/docs/en/02-how-to-run/useful_tools.md
@@ -83,7 +83,7 @@ python tools/onnx2pplnn.py \
- `onnx_path`: The path of the `ONNX` model to convert.
- `output_path`: The converted `PPLNN` algorithm path in json format.
- `device`: The device of the model during conversion.
-- `opt-shapes`: Optimal shapes for PPLNN optimization. The shape of each tensor should be wrap with "\[\]" or "()" and the shapes of tensors should be separated by ",".
+- `opt-shapes`: Optimal shapes for PPLNN optimization. The shape of each tensor should be wrap with "[]" or "()" and the shapes of tensors should be separated by ",".
- `--log-level`: To set log level which in `'CRITICAL', 'FATAL', 'ERROR', 'WARN', 'WARNING', 'INFO', 'DEBUG', 'NOTSET'`. If not specified, it will be set to `INFO`.
## onnx2tensorrt
diff --git a/docs/en/03-benchmark/benchmark.md b/docs/en/03-benchmark/benchmark.md
index 18ef2faa3b..933b8519f4 100644
--- a/docs/en/03-benchmark/benchmark.md
+++ b/docs/en/03-benchmark/benchmark.md
@@ -2009,7 +2009,7 @@ Users can directly test the performance through [how_to_evaluate_a_model.md](../
fp32 |
- TSN |
+ TSN |
Recognition |
Kinetics-400 |
top-1 |
@@ -2030,7 +2030,7 @@ Users can directly test the performance through [how_to_evaluate_a_model.md](../
- |
- SlowFast |
+ SlowFast |
Recognition |
Kinetics-400 |
top-1 |
diff --git a/docs/en/04-supported-codebases/mmaction2.md b/docs/en/04-supported-codebases/mmaction2.md
index 2ccf7c1ccf..f33ffdfe4d 100644
--- a/docs/en/04-supported-codebases/mmaction2.md
+++ b/docs/en/04-supported-codebases/mmaction2.md
@@ -21,7 +21,7 @@ ______________________________________________________________________
### Install mmaction2
-Please follow the [installation guide](https://github.com/open-mmlab/mmaction2/tree/dev-1.x#installation) to install mmaction2.
+Please follow the [installation guide](https://github.com/open-mmlab/mmaction2/tree/1.x#installation) to install mmaction2.
### Install mmdeploy
@@ -29,7 +29,7 @@ There are several methods to install mmdeploy, among which you can choose an app
**Method I:** Install precompiled package
-You can download the latest release package from [here](https://github.com/open-mmlab/mmdeploy/releases)
+You can refer to [get_started](https://mmdeploy.readthedocs.io/en/latest/get_started.html#installation)
**Method II:** Build using scripts
@@ -37,7 +37,7 @@ If your target platform is **Ubuntu 18.04 or later version**, we encourage you t
[scripts](../01-how-to-build/build_from_script.md). For example, the following commands install mmdeploy as well as inference engine - `ONNX Runtime`.
```shell
-git clone --recursive -b 1.x https://github.com/open-mmlab/mmdeploy.git
+git clone --recursive -b main https://github.com/open-mmlab/mmdeploy.git
cd mmdeploy
python3 tools/scripts/build_ubuntu_x64_ort.py $(nproc)
export PYTHONPATH=$(pwd)/build/lib:$PYTHONPATH
@@ -50,9 +50,9 @@ If neither **I** nor **II** meets your requirements, [building mmdeploy from sou
## Convert model
-You can use [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/tree/1.x/tools/deploy.py) to convert mmaction2 models to the specified backend models. Its detailed usage can be learned from [here](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/en/02-how-to-run/convert_model.md#usage).
+You can use [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/tree/main/tools/deploy.py) to convert mmaction2 models to the specified backend models. Its detailed usage can be learned from [here](https://github.com/open-mmlab/mmdeploy/tree/main/docs/en/02-how-to-run/convert_model.md#usage).
-When using `tools/deploy.py`, it is crucial to specify the correct deployment config. We've already provided builtin deployment config [files](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmaction) of all supported backends for mmaction2, under which the config file path follows the pattern:
+When using `tools/deploy.py`, it is crucial to specify the correct deployment config. We've already provided builtin deployment config [files](https://github.com/open-mmlab/mmdeploy/tree/main/configs/mmaction) of all supported backends for mmaction2, under which the config file path follows the pattern:
```
{task}/{task}_{backend}-{precision}_{static | dynamic}_{shape}.py
@@ -178,13 +178,13 @@ for label_id, score in result:
print(label_id, score)
```
-Besides python API, mmdeploy SDK also provides other FFI (Foreign Function Interface), such as C, C++, C#, Java and so on. You can learn their usage from [demos](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo).
+Besides python API, mmdeploy SDK also provides other FFI (Foreign Function Interface), such as C, C++, C#, Java and so on. You can learn their usage from [demos](https://github.com/open-mmlab/mmdeploy/tree/main/demo).
> MMAction2 only API of c, c++ and python for now.
## Supported models
-| Model | TorchScript | ONNX Runtime | TensorRT | ncnn | PPLNN | OpenVINO |
-| :-------------------------------------------------------------------------------------------- | :---------: | :----------: | :------: | :--: | :---: | :------: |
-| [TSN](https://github.com/open-mmlab/mmaction2/tree/dev-1.x/configs/recognition/tsn) | N | Y | Y | N | N | N |
-| [SlowFast](https://github.com/open-mmlab/mmaction2/tree/dev-1.x/configs/recognition/slowfast) | N | Y | Y | N | N | N |
+| Model | TorchScript | ONNX Runtime | TensorRT | ncnn | PPLNN | OpenVINO |
+| :---------------------------------------------------------------------------------------- | :---------: | :----------: | :------: | :--: | :---: | :------: |
+| [TSN](https://github.com/open-mmlab/mmaction2/tree/1.x/configs/recognition/tsn) | N | Y | Y | N | N | N |
+| [SlowFast](https://github.com/open-mmlab/mmaction2/tree/1.x/configs/recognition/slowfast) | N | Y | Y | N | N | N |
diff --git a/docs/en/04-supported-codebases/mmcls.md b/docs/en/04-supported-codebases/mmcls.md
index 1c39f6ba58..847ea33c57 100644
--- a/docs/en/04-supported-codebases/mmcls.md
+++ b/docs/en/04-supported-codebases/mmcls.md
@@ -27,7 +27,7 @@ There are several methods to install mmdeploy, among which you can choose an app
**Method I:** Install precompiled package
-> **TODO**. MMDeploy hasn't released based on 1.x branch.
+You can refer to [get_started](https://mmdeploy.readthedocs.io/en/latest/get_started.html#installation)
**Method II:** Build using scripts
@@ -35,7 +35,7 @@ If your target platform is **Ubuntu 18.04 or later version**, we encourage you t
[scripts](../01-how-to-build/build_from_script.md). For example, the following commands install mmdeploy as well as inference engine - `ONNX Runtime`.
```shell
-git clone --recursive -b 1.x https://github.com/open-mmlab/mmdeploy.git
+git clone --recursive -b main https://github.com/open-mmlab/mmdeploy.git
cd mmdeploy
python3 tools/scripts/build_ubuntu_x64_ort.py $(nproc)
export PYTHONPATH=$(pwd)/build/lib:$PYTHONPATH
@@ -48,7 +48,7 @@ If neither **I** nor **II** meets your requirements, [building mmdeploy from sou
## Convert model
-You can use [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/tree/1.x/tools/deploy.py) to convert mmcls models to the specified backend models. Its detailed usage can be learned from [here](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/en/02-how-to-run/convert_model.md#usage).
+You can use [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/tree/main/tools/deploy.py) to convert mmcls models to the specified backend models. Its detailed usage can be learned from [here](https://github.com/open-mmlab/mmdeploy/tree/main/docs/en/02-how-to-run/convert_model.md#usage).
The command below shows an example about converting `resnet18` model to onnx model that can be inferred by ONNX Runtime.
@@ -70,7 +70,7 @@ python tools/deploy.py \
--dump-info
```
-It is crucial to specify the correct deployment config during model conversion. We've already provided builtin deployment config [files](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmcls) of all supported backends for mmclassification. The config filename pattern is:
+It is crucial to specify the correct deployment config during model conversion. We've already provided builtin deployment config [files](https://github.com/open-mmlab/mmdeploy/tree/main/configs/mmcls) of all supported backends for mmclassification. The config filename pattern is:
```
classification_{backend}-{precision}_{static | dynamic}_{shape}.py
@@ -81,7 +81,7 @@ classification_{backend}-{precision}_{static | dynamic}_{shape}.py
- **{static | dynamic}:** static shape or dynamic shape
- **{shape}:** input shape or shape range of a model
-Therefore, in the above example, you can also convert `resnet18` to other backend models by changing the deployment config file `classification_onnxruntime_dynamic.py` to [others](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmcls), e.g., converting to tensorrt-fp16 model by `classification_tensorrt-fp16_dynamic-224x224-224x224.py`.
+Therefore, in the above example, you can also convert `resnet18` to other backend models by changing the deployment config file `classification_onnxruntime_dynamic.py` to [others](https://github.com/open-mmlab/mmdeploy/tree/main/configs/mmcls), e.g., converting to tensorrt-fp16 model by `classification_tensorrt-fp16_dynamic-224x224-224x224.py`.
```{tip}
When converting mmcls models to tensorrt models, --device should be set to "cuda"
diff --git a/docs/en/04-supported-codebases/mmdet.md b/docs/en/04-supported-codebases/mmdet.md
index 92d822e2ea..5d43a77f48 100644
--- a/docs/en/04-supported-codebases/mmdet.md
+++ b/docs/en/04-supported-codebases/mmdet.md
@@ -27,7 +27,7 @@ There are several methods to install mmdeploy, among which you can choose an app
**Method I:** Install precompiled package
-> **TODO**. MMDeploy hasn't released based on 1.x branch.
+You can refer to [get_started](https://mmdeploy.readthedocs.io/en/latest/get_started.html#installation)
**Method II:** Build using scripts
@@ -35,7 +35,7 @@ If your target platform is **Ubuntu 18.04 or later version**, we encourage you t
[scripts](../01-how-to-build/build_from_script.md). For example, the following commands install mmdeploy as well as inference engine - `ONNX Runtime`.
```shell
-git clone --recursive -b 1.x https://github.com/open-mmlab/mmdeploy.git
+git clone --recursive -b main https://github.com/open-mmlab/mmdeploy.git
cd mmdeploy
python3 tools/scripts/build_ubuntu_x64_ort.py $(nproc)
export PYTHONPATH=$(pwd)/build/lib:$PYTHONPATH
@@ -48,7 +48,7 @@ If neither **I** nor **II** meets your requirements, [building mmdeploy from sou
## Convert model
-You can use [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/tree/1.x/tools/deploy.py) to convert mmdet models to the specified backend models. Its detailed usage can be learned from [here](../02-how-to-run/convert_model.md).
+You can use [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/tree/main/tools/deploy.py) to convert mmdet models to the specified backend models. Its detailed usage can be learned from [here](../02-how-to-run/convert_model.md).
The command below shows an example about converting `Faster R-CNN` model to onnx model that can be inferred by ONNX Runtime.
@@ -68,7 +68,7 @@ python tools/deploy.py \
--dump-info
```
-It is crucial to specify the correct deployment config during model conversion. We've already provided builtin deployment config [files](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmdet) of all supported backends for mmdetection, under which the config file path follows the pattern:
+It is crucial to specify the correct deployment config during model conversion. We've already provided builtin deployment config [files](https://github.com/open-mmlab/mmdeploy/tree/main/configs/mmdet) of all supported backends for mmdetection, under which the config file path follows the pattern:
```
{task}/{task}_{backend}-{precision}_{static | dynamic}_{shape}.py
@@ -90,7 +90,7 @@ It is crucial to specify the correct deployment config during model conversion.
- **{shape}:** input shape or shape range of a model
-Therefore, in the above example, you can also convert `faster r-cnn` to other backend models by changing the deployment config file `detection_onnxruntime_dynamic.py` to [others](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmdet/detection), e.g., converting to tensorrt-fp16 model by `detection_tensorrt-fp16_dynamic-320x320-1344x1344.py`.
+Therefore, in the above example, you can also convert `faster r-cnn` to other backend models by changing the deployment config file `detection_onnxruntime_dynamic.py` to [others](https://github.com/open-mmlab/mmdeploy/tree/main/configs/mmdet/detection), e.g., converting to tensorrt-fp16 model by `detection_tensorrt-fp16_dynamic-320x320-1344x1344.py`.
```{tip}
When converting mmdet models to tensorrt models, --device should be set to "cuda"
@@ -185,7 +185,7 @@ for index, bbox, label_id in zip(indices, bboxes, labels):
cv2.imwrite('output_detection.png', img)
```
-Besides python API, mmdeploy SDK also provides other FFI (Foreign Function Interface), such as C, C++, C#, Java and so on. You can learn their usage from [demos](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo).
+Besides python API, mmdeploy SDK also provides other FFI (Foreign Function Interface), such as C, C++, C#, Java and so on. You can learn their usage from [demos](https://github.com/open-mmlab/mmdeploy/tree/main/demo).
## Supported models
diff --git a/docs/en/04-supported-codebases/mmedit.md b/docs/en/04-supported-codebases/mmedit.md
index f70ec726c2..298cbf03e3 100644
--- a/docs/en/04-supported-codebases/mmedit.md
+++ b/docs/en/04-supported-codebases/mmedit.md
@@ -28,7 +28,7 @@ There are several methods to install mmdeploy, among which you can choose an app
**Method I:** Install precompiled package
-> **TODO**. MMDeploy hasn't released based on 1.x branch.
+You can refer to [get_started](https://mmdeploy.readthedocs.io/en/latest/get_started.html#installation)
**Method II:** Build using scripts
@@ -36,7 +36,7 @@ If your target platform is **Ubuntu 18.04 or later version**, we encourage you t
[scripts](../01-how-to-build/build_from_script.md). For example, the following commands install mmdeploy as well as inference engine - `ONNX Runtime`.
```shell
-git clone --recursive -b 1.x https://github.com/open-mmlab/mmdeploy.git
+git clone --recursive -b main https://github.com/open-mmlab/mmdeploy.git
cd mmdeploy
python3 tools/scripts/build_ubuntu_x64_ort.py $(nproc)
export PYTHONPATH=$(pwd)/build/lib:$PYTHONPATH
@@ -49,9 +49,9 @@ If neither **I** nor **II** meets your requirements, [building mmdeploy from sou
## Convert model
-You can use [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/tree/1.x/tools/deploy.py) to convert mmedit models to the specified backend models. Its detailed usage can be learned from [here](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/en/02-how-to-run/convert_model.md#usage).
+You can use [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/tree/main/tools/deploy.py) to convert mmedit models to the specified backend models. Its detailed usage can be learned from [here](https://github.com/open-mmlab/mmdeploy/tree/main/docs/en/02-how-to-run/convert_model.md#usage).
-When using `tools/deploy.py`, it is crucial to specify the correct deployment config. We've already provided builtin deployment config [files](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmedit) of all supported backends for mmedit, under which the config file path follows the pattern:
+When using `tools/deploy.py`, it is crucial to specify the correct deployment config. We've already provided builtin deployment config [files](https://github.com/open-mmlab/mmdeploy/tree/main/configs/mmedit) of all supported backends for mmedit, under which the config file path follows the pattern:
```
{task}/{task}_{backend}-{precision}_{static | dynamic}_{shape}.py
@@ -91,7 +91,7 @@ python tools/deploy.py \
--dump-info
```
-You can also convert the above model to other backend models by changing the deployment config file `*_onnxruntime_dynamic.py` to [others](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmedit), e.g., converting to tensorrt model by `super-resolution/super-resolution_tensorrt-_dynamic-32x32-512x512.py`.
+You can also convert the above model to other backend models by changing the deployment config file `*_onnxruntime_dynamic.py` to [others](https://github.com/open-mmlab/mmdeploy/tree/main/configs/mmedit), e.g., converting to tensorrt model by `super-resolution/super-resolution_tensorrt-_dynamic-32x32-512x512.py`.
```{tip}
When converting mmedit models to tensorrt models, --device should be set to "cuda"
@@ -180,7 +180,7 @@ result = result[..., ::-1]
cv2.imwrite('output_restorer.bmp', result)
```
-Besides python API, mmdeploy SDK also provides other FFI (Foreign Function Interface), such as C, C++, C#, Java and so on. You can learn their usage from [demos](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo).
+Besides python API, mmdeploy SDK also provides other FFI (Foreign Function Interface), such as C, C++, C#, Java and so on. You can learn their usage from [demos](https://github.com/open-mmlab/mmdeploy/tree/main/demo).
## Supported models
diff --git a/docs/en/04-supported-codebases/mmocr.md b/docs/en/04-supported-codebases/mmocr.md
index 444abc91f9..447aae7fee 100644
--- a/docs/en/04-supported-codebases/mmocr.md
+++ b/docs/en/04-supported-codebases/mmocr.md
@@ -24,7 +24,7 @@ ______________________________________________________________________
### Install mmocr
-Please follow the [installation guide](https://mmocr.readthedocs.io/en/dev-1.x/get_started/install.html) to install mmocr.
+Please follow the [installation guide](https://mmocr.readthedocs.io/en/latest/install.html) to install mmocr.
### Install mmdeploy
@@ -32,7 +32,7 @@ There are several methods to install mmdeploy, among which you can choose an app
**Method I:** Install precompiled package
-> **TODO**. MMDeploy hasn't released based on 1.x branch.
+You can refer to [get_started](https://mmdeploy.readthedocs.io/en/latest/get_started.html#installation)
**Method II:** Build using scripts
@@ -40,7 +40,7 @@ If your target platform is **Ubuntu 18.04 or later version**, we encourage you t
[scripts](../01-how-to-build/build_from_script.md). For example, the following commands install mmdeploy as well as inference engine - `ONNX Runtime`.
```shell
-git clone --recursive -b 1.x https://github.com/open-mmlab/mmdeploy.git
+git clone --recursive -b main https://github.com/open-mmlab/mmdeploy.git
cd mmdeploy
python3 tools/scripts/build_ubuntu_x64_ort.py $(nproc)
export PYTHONPATH=$(pwd)/build/lib:$PYTHONPATH
@@ -53,9 +53,9 @@ If neither **I** nor **II** meets your requirements, [building mmdeploy from sou
## Convert model
-You can use [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/tree/1.x/tools/deploy.py) to convert mmocr models to the specified backend models. Its detailed usage can be learned from [here](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/en/02-how-to-run/convert_model.md#usage).
+You can use [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/tree/main/tools/deploy.py) to convert mmocr models to the specified backend models. Its detailed usage can be learned from [here](https://github.com/open-mmlab/mmdeploy/tree/main/docs/en/02-how-to-run/convert_model.md#usage).
-When using `tools/deploy.py`, it is crucial to specify the correct deployment config. We've already provided builtin deployment config [files](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmocr) of all supported backends for mmocr, under which the config file path follows the pattern:
+When using `tools/deploy.py`, it is crucial to specify the correct deployment config. We've already provided builtin deployment config [files](https://github.com/open-mmlab/mmdeploy/tree/main/configs/mmocr) of all supported backends for mmocr, under which the config file path follows the pattern:
```
{task}/{task}_{backend}-{precision}_{static | dynamic}_{shape}.py
@@ -113,7 +113,7 @@ python tools/deploy.py \
--dump-info
```
-You can also convert the above models to other backend models by changing the deployment config file `*_onnxruntime_dynamic.py` to [others](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmocr), e.g., converting `dbnet` to tensorrt-fp32 model by `text-detection/text-detection_tensorrt-_dynamic-320x320-2240x2240.py`.
+You can also convert the above models to other backend models by changing the deployment config file `*_onnxruntime_dynamic.py` to [others](https://github.com/open-mmlab/mmdeploy/tree/main/configs/mmocr), e.g., converting `dbnet` to tensorrt-fp32 model by `text-detection/text-detection_tensorrt-_dynamic-320x320-2240x2240.py`.
```{tip}
When converting mmocr models to tensorrt models, --device should be set to "cuda"
@@ -230,7 +230,7 @@ texts = recognizer(img)
print(texts)
```
-Besides python API, mmdeploy SDK also provides other FFI (Foreign Function Interface), such as C, C++, C#, Java and so on. You can learn their usage from [demos](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo).
+Besides python API, mmdeploy SDK also provides other FFI (Foreign Function Interface), such as C, C++, C#, Java and so on. You can learn their usage from [demos](https://github.com/open-mmlab/mmdeploy/tree/main/demo).
## Supported models
diff --git a/docs/en/04-supported-codebases/mmpose.md b/docs/en/04-supported-codebases/mmpose.md
index 350e45d809..98fd292531 100644
--- a/docs/en/04-supported-codebases/mmpose.md
+++ b/docs/en/04-supported-codebases/mmpose.md
@@ -27,7 +27,7 @@ There are several methods to install mmdeploy, among which you can choose an app
**Method I:** Install precompiled package
-> **TODO**. MMDeploy hasn't released based on 1.x branch.
+You can refer to [get_started](https://mmdeploy.readthedocs.io/en/latest/get_started.html#installation)
**Method II:** Build using scripts
@@ -35,7 +35,7 @@ If your target platform is **Ubuntu 18.04 or later version**, we encourage you t
[scripts](../01-how-to-build/build_from_script.md). For example, the following commands install mmdeploy as well as inference engine - `ONNX Runtime`.
```shell
-git clone --recursive -b 1.x https://github.com/open-mmlab/mmdeploy.git
+git clone --recursive -b main https://github.com/open-mmlab/mmdeploy.git
cd mmdeploy
python3 tools/scripts/build_ubuntu_x64_ort.py $(nproc)
export PYTHONPATH=$(pwd)/build/lib:$PYTHONPATH
@@ -48,7 +48,7 @@ If neither **I** nor **II** meets your requirements, [building mmdeploy from sou
## Convert model
-You can use [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/tree/1.x/tools/deploy.py) to convert mmpose models to the specified backend models. Its detailed usage can be learned from [here](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/en/02-how-to-run/convert_model.md#usage).
+You can use [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/tree/main/tools/deploy.py) to convert mmpose models to the specified backend models. Its detailed usage can be learned from [here](https://github.com/open-mmlab/mmdeploy/tree/main/docs/en/02-how-to-run/convert_model.md#usage).
The command below shows an example about converting `hrnet` model to onnx model that can be inferred by ONNX Runtime.
@@ -67,7 +67,7 @@ python tools/deploy.py \
--show
```
-It is crucial to specify the correct deployment config during model conversion. We've already provided builtin deployment config [files](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmpose) of all supported backends for mmpose. The config filename pattern is:
+It is crucial to specify the correct deployment config during model conversion. We've already provided builtin deployment config [files](https://github.com/open-mmlab/mmdeploy/tree/main/configs/mmpose) of all supported backends for mmpose. The config filename pattern is:
```
pose-detection_{backend}-{precision}_{static | dynamic}_{shape}.py
@@ -78,7 +78,7 @@ pose-detection_{backend}-{precision}_{static | dynamic}_{shape}.py
- **{static | dynamic}:** static shape or dynamic shape
- **{shape}:** input shape or shape range of a model
-Therefore, in the above example, you can also convert `hrnet` to other backend models by changing the deployment config file `pose-detection_onnxruntime_static.py` to [others](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmpose), e.g., converting to tensorrt model by `pose-detection_tensorrt_static-256x192.py`.
+Therefore, in the above example, you can also convert `hrnet` to other backend models by changing the deployment config file `pose-detection_onnxruntime_static.py` to [others](https://github.com/open-mmlab/mmdeploy/tree/main/configs/mmpose), e.g., converting to tensorrt model by `pose-detection_tensorrt_static-256x192.py`.
```{tip}
When converting mmpose models to tensorrt models, --device should be set to "cuda"
diff --git a/docs/en/04-supported-codebases/mmrotate.md b/docs/en/04-supported-codebases/mmrotate.md
index d228582910..712c727995 100644
--- a/docs/en/04-supported-codebases/mmrotate.md
+++ b/docs/en/04-supported-codebases/mmrotate.md
@@ -27,7 +27,7 @@ There are several methods to install mmdeploy, among which you can choose an app
**Method I:** Install precompiled package
-> **TODO**. MMDeploy hasn't released based on dev-1.x branch.
+You can refer to [get_started](https://mmdeploy.readthedocs.io/en/latest/get_started.html#installation)
**Method II:** Build using scripts
@@ -35,7 +35,7 @@ If your target platform is **Ubuntu 18.04 or later version**, we encourage you t
[scripts](../01-how-to-build/build_from_script.md). For example, the following commands install mmdeploy as well as inference engine - `ONNX Runtime`.
```shell
-git clone --recursive -b dev-1.x https://github.com/open-mmlab/mmdeploy.git
+git clone --recursive -b main https://github.com/open-mmlab/mmdeploy.git
cd mmdeploy
python3 tools/scripts/build_ubuntu_x64_ort.py $(nproc)
export PYTHONPATH=$(pwd)/build/lib:$PYTHONPATH
@@ -53,7 +53,7 @@ If neither **I** nor **II** meets your requirements, [building mmdeploy from sou
## Convert model
-You can use [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/tools/deploy.py) to convert mmrotate models to the specified backend models. Its detailed usage can be learned from [here](https://github.com/open-mmlab/mmdeploy/blob/master/docs/en/02-how-to-run/convert_model.md#usage).
+You can use [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/blob/main/tools/deploy.py) to convert mmrotate models to the specified backend models. Its detailed usage can be learned from [here](https://github.com/open-mmlab/mmdeploy/blob/main/docs/en/02-how-to-run/convert_model.md#usage).
The command below shows an example about converting `rotated-faster-rcnn` model to onnx model that can be inferred by ONNX Runtime.
@@ -76,7 +76,7 @@ python tools/deploy.py \
--dump-info
```
-It is crucial to specify the correct deployment config during model conversion. We've already provided builtin deployment config [files](https://github.com/open-mmlab/mmdeploy/tree/dev-1.x/configs/mmrotate) of all supported backends for mmrotate. The config filename pattern is:
+It is crucial to specify the correct deployment config during model conversion. We've already provided builtin deployment config [files](https://github.com/open-mmlab/mmdeploy/tree/main/configs/mmrotate) of all supported backends for mmrotate. The config filename pattern is:
```
rotated_detection-{backend}-{precision}_{static | dynamic}_{shape}.py
@@ -87,7 +87,7 @@ rotated_detection-{backend}-{precision}_{static | dynamic}_{shape}.py
- **{static | dynamic}:** static shape or dynamic shape
- **{shape}:** input shape or shape range of a model
-Therefore, in the above example, you can also convert `rotated-faster-rcnn` to other backend models by changing the deployment config file `rotated-detection_onnxruntime_dynamic` to [others](https://github.com/open-mmlab/mmdeploy/tree/dev-1.x/configs/mmrotate), e.g., converting to tensorrt-fp16 model by `rotated-detection_tensorrt-fp16_dynamic-320x320-1024x1024.py`.
+Therefore, in the above example, you can also convert `rotated-faster-rcnn` to other backend models by changing the deployment config file `rotated-detection_onnxruntime_dynamic` to [others](https://github.com/open-mmlab/mmdeploy/tree/main/configs/mmrotate), e.g., converting to tensorrt-fp16 model by `rotated-detection_tensorrt-fp16_dynamic-320x320-1024x1024.py`.
```{tip}
When converting mmrotate models to tensorrt models, --device should be set to "cuda"
@@ -172,7 +172,7 @@ detector = RotatedDetector(model_path='./mmdeploy_models/mmrotate/ort', device_n
det = detector(img)
```
-Besides python API, mmdeploy SDK also provides other FFI (Foreign Function Interface), such as C, C++, C#, Java and so on. You can learn their usage from [demos](https://github.com/open-mmlab/mmdeploy/tree/dev-1.x/demo).
+Besides python API, mmdeploy SDK also provides other FFI (Foreign Function Interface), such as C, C++, C#, Java and so on. You can learn their usage from [demos](https://github.com/open-mmlab/mmdeploy/tree/main/demo).
## Supported models
@@ -182,4 +182,4 @@ Besides python API, mmdeploy SDK also provides other FFI (Foreign Function Inter
| [Rotated FasterRCNN](https://github.com/open-mmlab/mmrotate/blob/1.x/configs/rotated_faster_rcnn) | Y | Y |
| [Oriented R-CNN](https://github.com/open-mmlab/mmrotate/blob/1.x/configs/oriented_rcnn) | Y | Y |
| [Gliding Vertex](https://github.com/open-mmlab/mmrotate/blob/1.x/configs/gliding_vertex) | Y | Y |
-| [RTMDET-R](https://github.com/open-mmlab/mmrotate/blob/dev-1.x/configs/rotated_rtmdet) | Y | Y |
+| [RTMDET-R](https://github.com/open-mmlab/mmrotate/blob/1.x/configs/rotated_rtmdet) | Y | Y |
diff --git a/docs/en/04-supported-codebases/mmseg.md b/docs/en/04-supported-codebases/mmseg.md
index 003cde7067..3faeb5061e 100644
--- a/docs/en/04-supported-codebases/mmseg.md
+++ b/docs/en/04-supported-codebases/mmseg.md
@@ -20,7 +20,7 @@ ______________________________________________________________________
### Install mmseg
-Please follow the [installation guide](https://mmsegmentation.readthedocs.io/en/1.x/get_started.html) to install mmseg.
+Please follow the [installation guide](https://mmsegmentation.readthedocs.io/en/latest/get_started.html) to install mmseg.
### Install mmdeploy
@@ -28,7 +28,7 @@ There are several methods to install mmdeploy, among which you can choose an app
**Method I:** Install precompiled package
-> **TODO**. MMDeploy hasn't released based on 1.x branch.
+You can refer to [get_started](https://mmdeploy.readthedocs.io/en/latest/get_started.html#installation)
**Method II:** Build using scripts
@@ -36,7 +36,7 @@ If your target platform is **Ubuntu 18.04 or later version**, we encourage you t
[scripts](../01-how-to-build/build_from_script.md). For example, the following commands install mmdeploy as well as inference engine - `ONNX Runtime`.
```shell
-git clone --recursive -b 1.x https://github.com/open-mmlab/mmdeploy.git
+git clone --recursive -b main https://github.com/open-mmlab/mmdeploy.git
cd mmdeploy
python3 tools/scripts/build_ubuntu_x64_ort.py $(nproc)
export PYTHONPATH=$(pwd)/build/lib:$PYTHONPATH
@@ -54,7 +54,7 @@ If neither **I** nor **II** meets your requirements, [building mmdeploy from sou
## Convert model
-You can use [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/tree/1.x/tools/deploy.py) to convert mmseg models to the specified backend models. Its detailed usage can be learned from [here](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/en/02-how-to-run/convert_model.md#usage).
+You can use [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/tree/main/tools/deploy.py) to convert mmseg models to the specified backend models. Its detailed usage can be learned from [here](https://github.com/open-mmlab/mmdeploy/tree/main/docs/en/02-how-to-run/convert_model.md#usage).
The command below shows an example about converting `unet` model to onnx model that can be inferred by ONNX Runtime.
@@ -76,7 +76,7 @@ python tools/deploy.py \
--dump-info
```
-It is crucial to specify the correct deployment config during model conversion. We've already provided builtin deployment config [files](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmseg) of all supported backends for mmsegmentation. The config filename pattern is:
+It is crucial to specify the correct deployment config during model conversion. We've already provided builtin deployment config [files](https://github.com/open-mmlab/mmdeploy/tree/main/configs/mmseg) of all supported backends for mmsegmentation. The config filename pattern is:
```
segmentation_{backend}-{precision}_{static | dynamic}_{shape}.py
@@ -87,7 +87,7 @@ segmentation_{backend}-{precision}_{static | dynamic}_{shape}.py
- **{static | dynamic}:** static shape or dynamic shape
- **{shape}:** input shape or shape range of a model
-Therefore, in the above example, you can also convert `unet` to other backend models by changing the deployment config file `segmentation_onnxruntime_dynamic.py` to [others](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmseg), e.g., converting to tensorrt-fp16 model by `segmentation_tensorrt-fp16_dynamic-512x1024-2048x2048.py`.
+Therefore, in the above example, you can also convert `unet` to other backend models by changing the deployment config file `segmentation_onnxruntime_dynamic.py` to [others](https://github.com/open-mmlab/mmdeploy/tree/main/configs/mmseg), e.g., converting to tensorrt-fp16 model by `segmentation_tensorrt-fp16_dynamic-512x1024-2048x2048.py`.
```{tip}
When converting mmseg models to tensorrt models, --device should be set to "cuda"
@@ -184,7 +184,7 @@ img = img.astype(np.uint8)
cv2.imwrite('output_segmentation.png', img)
```
-Besides python API, mmdeploy SDK also provides other FFI (Foreign Function Interface), such as C, C++, C#, Java and so on. You can learn their usage from [demos](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo).
+Besides python API, mmdeploy SDK also provides other FFI (Foreign Function Interface), such as C, C++, C#, Java and so on. You can learn their usage from [demos](https://github.com/open-mmlab/mmdeploy/tree/main/demo).
## Supported models
diff --git a/docs/en/05-supported-backends/rknn.md b/docs/en/05-supported-backends/rknn.md
index 28cb10e948..deacbf607a 100644
--- a/docs/en/05-supported-backends/rknn.md
+++ b/docs/en/05-supported-backends/rknn.md
@@ -2,7 +2,7 @@
Currently, MMDeploy only tests rk3588 and rv1126 with linux platform.
-The following features cannot be automatically enabled by mmdeploy and you need to manually modify the configuration in MMDeploy like [here](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/_base_/backends/rknn.py).
+The following features cannot be automatically enabled by mmdeploy and you need to manually modify the configuration in MMDeploy like [here](https://github.com/open-mmlab/mmdeploy/tree/main/configs/_base_/backends/rknn.py).
- target_platform other than default
- quantization settings
diff --git a/docs/en/06-custom-ops/tensorrt.md b/docs/en/06-custom-ops/tensorrt.md
index f00f8c7f4c..b2a2af5b12 100644
--- a/docs/en/06-custom-ops/tensorrt.md
+++ b/docs/en/06-custom-ops/tensorrt.md
@@ -308,7 +308,7 @@ Perform RoIAlign on output feature, used in bbox_head of most two-stage detector
#### Description
-ScatterND takes three inputs `data` tensor of rank r >= 1, `indices` tensor of rank q >= 1, and `updates` tensor of rank q + r - indices.shape\[-1\] - 1. The output of the operation is produced by creating a copy of the input `data`, and then updating its value to values specified by updates at specific index positions specified by `indices`. Its output shape is the same as the shape of `data`. Note that `indices` should not have duplicate entries. That is, two or more updates for the same index-location is not supported.
+ScatterND takes three inputs `data` tensor of rank r >= 1, `indices` tensor of rank q >= 1, and `updates` tensor of rank q + r - indices.shape[-1] - 1. The output of the operation is produced by creating a copy of the input `data`, and then updating its value to values specified by updates at specific index positions specified by `indices`. Its output shape is the same as the shape of `data`. Note that `indices` should not have duplicate entries. That is, two or more updates for the same index-location is not supported.
The `output` is calculated via the following equation:
diff --git a/docs/en/conf.py b/docs/en/conf.py
index c130810b74..6dcaeb0c18 100644
--- a/docs/en/conf.py
+++ b/docs/en/conf.py
@@ -105,7 +105,7 @@
# documentation.
#
html_theme_options = {
- 'logo_url': 'https://mmdeploy.readthedocs.io/en/1.x/',
+ 'logo_url': 'https://mmdeploy.readthedocs.io/en/latest/',
'menu': [{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmdeploy'
diff --git a/docs/en/faq.md b/docs/en/faq.md
index 1eea9b08e3..d8143fa569 100644
--- a/docs/en/faq.md
+++ b/docs/en/faq.md
@@ -6,7 +6,7 @@
Fp16 mode requires a device with full-rate fp16 support.
-- "error: parameter check failed at: engine.cpp::setBindingDimensions::1046, condition: profileMinDims.d\[i\] \<= dimensions.d\[i\]"
+- "error: parameter check failed at: engine.cpp::setBindingDimensions::1046, condition: profileMinDims.d[i] \<= dimensions.d[i]"
When building an `ICudaEngine` from an `INetworkDefinition` that has dynamically resizable inputs, users need to specify at least one optimization profile. Which can be set in deploy config:
@@ -25,7 +25,7 @@
The input tensor shape should be limited between `min_shape` and `max_shape`.
-- "error: \[TensorRT\] INTERNAL ERROR: Assertion failed: cublasStatus == CUBLAS_STATUS_SUCCESS"
+- "error: [TensorRT] INTERNAL ERROR: Assertion failed: cublasStatus == CUBLAS_STATUS_SUCCESS"
TRT 7.2.1 switches to use cuBLASLt (previously it was cuBLAS). cuBLASLt is the defaulted choice for SM version >= 7.0. You may need CUDA-10.2 Patch 1 (Released Aug 26, 2020) to resolve some cuBLASLt issues. Another option is to use the new TacticSource API and disable cuBLASLt tactics if you dont want to upgrade.
diff --git a/docs/en/get_started.md b/docs/en/get_started.md
index 07b0006931..9fce872ea1 100644
--- a/docs/en/get_started.md
+++ b/docs/en/get_started.md
@@ -118,14 +118,14 @@ Take the latest precompiled package as example, you can install it as follows:
```shell
# 1. install MMDeploy model converter
-pip install mmdeploy==1.0.0rc3
+pip install mmdeploy==1.0.0
# 2. install MMDeploy sdk inference
# you can install one to install according whether you need gpu inference
# 2.1 support onnxruntime
-pip install mmdeploy-runtime==1.0.0rc3
+pip install mmdeploy-runtime==1.0.0
# 2.2 support onnxruntime-gpu, tensorrt
-pip install mmdeploy-runtime-gpu==1.0.0rc3
+pip install mmdeploy-runtime-gpu==1.0.0
# 3. install inference engine
# 3.1 install TensorRT
@@ -170,7 +170,7 @@ Based on the above settings, we provide an example to convert the Faster R-CNN i
```shell
# clone mmdeploy to get the deployment config. `--recursive` is not necessary
-git clone -b dev-1.x https://github.com/open-mmlab/mmdeploy.git
+git clone -b main https://github.com/open-mmlab/mmdeploy.git
# clone mmdetection repo. We have to use the config file to build PyTorch nn module
git clone -b 3.x https://github.com/open-mmlab/mmdetection.git
@@ -230,9 +230,9 @@ result = inference_model(
You can directly run MMDeploy demo programs in the precompiled package to get inference results.
```shell
-wget https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0rc3/mmdeploy-1.0.0rc3-linux-x86_64-cuda11.3.tar.gz
-tar xf mmdeploy-1.0.0rc3-linux-x86_64-cuda11.3
-cd mmdeploy-1.0.0rc3-linux-x86_64-cuda11.3
+wget https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-linux-x86_64-cuda11.3.tar.gz
+tar xf mmdeploy-1.0.0-linux-x86_64-cuda11.3
+cd mmdeploy-1.0.0-linux-x86_64-cuda11.3
# run python demo
python example/python/object_detection.py cuda ../mmdeploy_model/faster-rcnn ../mmdetection/demo/demo.jpg
# run C/C++ demo
@@ -269,7 +269,7 @@ for index, bbox, label_id in zip(indices, bboxes, labels):
cv2.imwrite('output_detection.png', img)
```
-You can find more examples from [here](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo/python).
+You can find more examples from [here](https://github.com/open-mmlab/mmdeploy/tree/main/demo/python).
#### C++ API
@@ -321,9 +321,9 @@ find_package(MMDeploy REQUIRED)
target_link_libraries(${name} PRIVATE mmdeploy ${OpenCV_LIBS})
```
-For more SDK C++ API usages, please read these [samples](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo/csrc/cpp).
+For more SDK C++ API usages, please read these [samples](https://github.com/open-mmlab/mmdeploy/tree/main/demo/csrc/cpp).
-For the rest C, C# and Java API usages, please read [C demos](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo/csrc/c), [C# demos](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo/csharp) and [Java demos](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo/java) respectively.
+For the rest C, C# and Java API usages, please read [C demos](https://github.com/open-mmlab/mmdeploy/tree/main/demo/csrc/c), [C# demos](https://github.com/open-mmlab/mmdeploy/tree/main/demo/csharp) and [Java demos](https://github.com/open-mmlab/mmdeploy/tree/main/demo/java) respectively.
We'll talk about them more in our next release.
#### Accelerate preprocessing(Experimental)
diff --git a/docs/en/switch_language.md b/docs/en/switch_language.md
index 03bf778b72..1b5df42929 100644
--- a/docs/en/switch_language.md
+++ b/docs/en/switch_language.md
@@ -1,3 +1,3 @@
-## English
+## English
-## 简体中文
+## 简体中文
diff --git a/docs/zh_cn/01-how-to-build/build_from_docker.md b/docs/zh_cn/01-how-to-build/build_from_docker.md
index 7cc44072f7..f98a1a6550 100644
--- a/docs/zh_cn/01-how-to-build/build_from_docker.md
+++ b/docs/zh_cn/01-how-to-build/build_from_docker.md
@@ -51,7 +51,7 @@ docker run --gpus all -it mmdeploy:master-gpu
如 [这里](https://forums.developer.nvidia.com/t/cuda-error-the-provided-ptx-was-compiled-with-an-unsupported-toolchain/185754)所说,更新 GPU 的驱动到您的GPU能使用的最新版本。
-2. docker: Error response from daemon: could not select device driver "" with capabilities: \[gpu\].
+2. docker: Error response from daemon: could not select device driver "" with capabilities: [gpu].
```
# Add the package repositories
diff --git a/docs/zh_cn/01-how-to-build/rockchip.md b/docs/zh_cn/01-how-to-build/rockchip.md
index 4a371c543a..109ac2d5d8 100644
--- a/docs/zh_cn/01-how-to-build/rockchip.md
+++ b/docs/zh_cn/01-how-to-build/rockchip.md
@@ -105,7 +105,7 @@ python tools/deploy.py \
- YOLOV3 & YOLOX
-将下面的模型拆分配置写入到 [detection_rknn_static.py](https://github.com/open-mmlab/mmdeploy/blob/1.x/configs/mmdet/detection/detection_rknn-int8_static-320x320.py)
+将下面的模型拆分配置写入到 [detection_rknn_static.py](https://github.com/open-mmlab/mmdeploy/blob/main/configs/mmdet/detection/detection_rknn-int8_static-320x320.py)
```python
# yolov3, yolox for rknn-toolkit and rknn-toolkit2
@@ -136,7 +136,7 @@ python tools/deploy.py \
- RTMDet
-将下面的模型拆分配置写入到 [detection_rknn-int8_static-640x640.py](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/configs/mmdet/detection/detection_rknn-int8_static-640x640.py)
+将下面的模型拆分配置写入到 [detection_rknn-int8_static-640x640.py](https://github.com/open-mmlab/mmdeploy/blob/main/configs/mmdet/detection/detection_rknn-int8_static-640x640.py)
```python
# rtmdet for rknn-toolkit and rknn-toolkit2
@@ -154,7 +154,7 @@ partition_config = dict(
- RetinaNet & SSD & FSAF with rknn-toolkit2
-将下面的模型拆分配置写入到 [detection_rknn_static.py](https://github.com/open-mmlab/mmdeploy/blob/1.x/configs/mmdet/detection/detection_rknn-int8_static-320x320.py)。使用 rknn-toolkit 的用户则不用。
+将下面的模型拆分配置写入到 [detection_rknn_static.py](https://github.com/open-mmlab/mmdeploy/blob/main/configs/mmdet/detection/detection_rknn-int8_static-320x320.py)。使用 rknn-toolkit 的用户则不用。
```python
# retinanet, ssd and fsaf for rknn-toolkit2
diff --git a/docs/zh_cn/02-how-to-run/prebuilt_package_windows.md b/docs/zh_cn/02-how-to-run/prebuilt_package_windows.md
index 134bc260b7..c98848b176 100644
--- a/docs/zh_cn/02-how-to-run/prebuilt_package_windows.md
+++ b/docs/zh_cn/02-how-to-run/prebuilt_package_windows.md
@@ -23,7 +23,7 @@ ______________________________________________________________________
目前,`MMDeploy`在`Windows`平台下提供`cpu`以及`cuda`两种Device的预编译包,其中`cpu`版支持使用onnxruntime cpu进行推理,`cuda`版支持使用onnxruntime-gpu以及tensorrt进行推理,可以从[Releases](https://github.com/open-mmlab/mmdeploy/releases)获取。。
-本篇教程以`mmdeploy-1.0.0rc3-windows-amd64.zip`和`mmdeploy-1.0.0rc3-windows-amd64-cuda11.3.zip`为例,展示预编译包的使用方法。
+本篇教程以`mmdeploy-1.0.0-windows-amd64.zip`和`mmdeploy-1.0.0-windows-amd64-cuda11.3.zip`为例,展示预编译包的使用方法。
为了方便使用者快速上手,本教程以分类模型(mmclassification)为例,展示两种预编译包的使用方法。
@@ -89,8 +89,8 @@ ______________________________________________________________________
5. 安装`mmdeploy`(模型转换)以及`mmdeploy_runtime`(模型推理Python API)的预编译包
```bash
- pip install mmdeploy==1.0.0rc3
- pip install mmdeploy-runtime==1.0.0rc3
+ pip install mmdeploy==1.0.0
+ pip install mmdeploy-runtime==1.0.0
```
:point_right: 如果之前安装过,需要先卸载后再安装。
@@ -108,7 +108,7 @@ ______________________________________________________________________
![sys-path](https://user-images.githubusercontent.com/16019484/181463801-1d7814a8-b256-46e9-86f2-c08de0bc150b.png)
:exclamation: 重启powershell让环境变量生效,可以通过 echo $env:PATH 来检查是否设置成功。
-8. 下载 SDK C/cpp Library mmdeploy-1.0.0rc3-windows-amd64.zip
+8. 下载 SDK C/cpp Library mmdeploy-1.0.0-windows-amd64.zip
### TensorRT
@@ -117,8 +117,8 @@ ______________________________________________________________________
5. 安装`mmdeploy`(模型转换)以及`mmdeploy_runtime`(模型推理Python API)的预编译包
```bash
- pip install mmdeploy==1.0.0rc3
- pip install mmdeploy-runtime-gpu==1.0.0rc3
+ pip install mmdeploy==1.0.0
+ pip install mmdeploy-runtime-gpu==1.0.0
```
:point_right: 如果之前安装过,需要先卸载后再安装
@@ -137,7 +137,7 @@ ______________________________________________________________________
7. 安装pycuda `pip install pycuda`
-8. 下载 SDK C/cpp Library mmdeploy-1.0.0rc3-windows-amd64-cuda11.3.zip
+8. 下载 SDK C/cpp Library mmdeploy-1.0.0-windows-amd64-cuda11.3.zip
## 模型转换
@@ -149,7 +149,7 @@ ______________________________________________________________________
```
..
-|-- mmdeploy-1.0.0rc3-windows-amd64
+|-- mmdeploy-1.0.0-windows-amd64
|-- mmclassification
|-- mmdeploy
`-- resnet18_8xb32_in1k_20210831-fbbb1da6.pth
@@ -197,7 +197,7 @@ export2SDK(deploy_cfg, model_cfg, work_dir, pth=model_checkpoint, device=device)
```
..
-|-- mmdeploy-1.0.0rc3-windows-amd64-cuda11.3
+|-- mmdeploy-1.0.0-windows-amd64-cuda11.3
|-- mmclassification
|-- mmdeploy
`-- resnet18_8xb32_in1k_20210831-fbbb1da6.pth
@@ -260,8 +260,8 @@ export2SDK(deploy_cfg, model_cfg, work_dir, pth=model_checkpoint, device=device)
```
.
-|-- mmdeploy-1.0.0rc3-windows-amd64-cuda11.1-tensorrt8.2.3.0
-|-- mmdeploy-1.0.0rc3-windows-amd64-onnxruntime1.8.1
+|-- mmdeploy-1.0.0-windows-amd64
+|-- mmdeploy-1.0.0-windows-amd64-cuda11.3
|-- mmclassification
|-- mmdeploy
|-- resnet18_8xb32_in1k_20210831-fbbb1da6.pth
@@ -340,7 +340,7 @@ python .\mmdeploy\demo\python\image_classification.py cpu .\work_dir\onnx\resnet
这里建议使用cmd,这样如果exe运行时如果找不到相关的dll的话会有弹窗
- 在mmdeploy-1.0.0rc3-windows-amd64-onnxruntime1.8.1\\example\\cpp\\build\\Release目录下:
+ 在mmdeploy-1.0.0-windows-amd64\\example\\cpp\\build\\Release目录下:
```
.\image_classification.exe cpu C:\workspace\work_dir\onnx\resnet\ C:\workspace\mmclassification\demo\demo.JPEG
@@ -360,7 +360,7 @@ python .\mmdeploy\demo\python\image_classification.py cpu .\work_dir\onnx\resnet
这里建议使用cmd,这样如果exe运行时如果找不到相关的dll的话会有弹窗
- 在mmdeploy-1.0.0rc3-windows-amd64-cuda11.1-tensorrt8.2.3.0\\example\\cpp\\build\\Release目录下:
+ 在mmdeploy-1.0.0-windows-amd64-cuda11.3\\example\\cpp\\build\\Release目录下:
```
.\image_classification.exe cuda C:\workspace\work_dir\trt\resnet C:\workspace\mmclassification\demo\demo.JPEG
diff --git a/docs/zh_cn/02-how-to-run/useful_tools.md b/docs/zh_cn/02-how-to-run/useful_tools.md
index bbd823c697..2f08cefa9f 100644
--- a/docs/zh_cn/02-how-to-run/useful_tools.md
+++ b/docs/zh_cn/02-how-to-run/useful_tools.md
@@ -83,7 +83,7 @@ python tools/onnx2pplnn.py \
- `onnx_path`: The path of the `ONNX` model to convert.
- `output_path`: The converted `PPLNN` algorithm path in json format.
- `device`: The device of the model during conversion.
-- `opt-shapes`: Optimal shapes for PPLNN optimization. The shape of each tensor should be wrap with "\[\]" or "()" and the shapes of tensors should be separated by ",".
+- `opt-shapes`: Optimal shapes for PPLNN optimization. The shape of each tensor should be wrap with "[]" or "()" and the shapes of tensors should be separated by ",".
- `--log-level`: To set log level which in `'CRITICAL', 'FATAL', 'ERROR', 'WARN', 'WARNING', 'INFO', 'DEBUG', 'NOTSET'`. If not specified, it will be set to `INFO`.
## onnx2tensorrt
diff --git a/docs/zh_cn/03-benchmark/benchmark.md b/docs/zh_cn/03-benchmark/benchmark.md
index 1e1a0c5d10..1a473cff9f 100644
--- a/docs/zh_cn/03-benchmark/benchmark.md
+++ b/docs/zh_cn/03-benchmark/benchmark.md
@@ -2004,7 +2004,7 @@ GPU: ncnn, TensorRT, PPLNN
fp32 |
- TSN |
+ TSN |
Recognition |
Kinetics-400 |
top-1 |
@@ -2025,7 +2025,7 @@ GPU: ncnn, TensorRT, PPLNN
- |
- SlowFast |
+ SlowFast |
Recognition |
Kinetics-400 |
top-1 |
diff --git a/docs/zh_cn/04-supported-codebases/mmaction2.md b/docs/zh_cn/04-supported-codebases/mmaction2.md
index af299a0b7e..f1a42307e7 100644
--- a/docs/zh_cn/04-supported-codebases/mmaction2.md
+++ b/docs/zh_cn/04-supported-codebases/mmaction2.md
@@ -21,7 +21,7 @@ ______________________________________________________________________
### 安装 mmaction2
-请参考[官网安装指南](https://github.com/open-mmlab/mmaction2/tree/dev-1.x#installation).
+请参考[官网安装指南](https://github.com/open-mmlab/mmaction2/tree/1.x#installation).
### 安装 mmdeploy
@@ -29,7 +29,7 @@ mmdeploy 有以下几种安装方式:
**方式一:** 安装预编译包
-通过此[链接](https://github.com/open-mmlab/mmdeploy/releases)获取最新的预编译包
+请参考[安装概述](https://mmdeploy.readthedocs.io/zh_CN/latest/get_started.html#mmdeploy)
**方式二:** 一键式脚本安装
@@ -37,7 +37,7 @@ mmdeploy 有以下几种安装方式:
比如,以下命令可以安装 mmdeploy 以及配套的推理引擎——`ONNX Runtime`.
```shell
-git clone --recursive -b 1.x https://github.com/open-mmlab/mmdeploy.git
+git clone --recursive -b main https://github.com/open-mmlab/mmdeploy.git
cd mmdeploy
python3 tools/scripts/build_ubuntu_x64_ort.py $(nproc)
export PYTHONPATH=$(pwd)/build/lib:$PYTHONPATH
@@ -50,10 +50,10 @@ export LD_LIBRARY_PATH=$(pwd)/../mmdeploy-dep/onnxruntime-linux-x64-1.8.1/lib/:$
## 模型转换
-你可以使用 [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/tree/1.x/tools/deploy.py) 把 mmaction2 模型一键式转换为推理后端模型。
-该工具的详细使用说明请参考[这里](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/en/02-how-to-run/convert_model.md#usage).
+你可以使用 [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/tree/main/tools/deploy.py) 把 mmaction2 模型一键式转换为推理后端模型。
+该工具的详细使用说明请参考[这里](https://github.com/open-mmlab/mmdeploy/tree/main/docs/en/02-how-to-run/convert_model.md#usage).
-转换的关键之一是使用正确的配置文件。项目中已内置了各后端部署[配置文件](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmaction)。
+转换的关键之一是使用正确的配置文件。项目中已内置了各后端部署[配置文件](https://github.com/open-mmlab/mmdeploy/tree/main/configs/mmaction)。
文件的命名模式是:
```
@@ -181,13 +181,13 @@ for label_id, score in result:
```
除了python API,mmdeploy SDK 还提供了诸如 C、C++、C#、Java等多语言接口。
-你可以参考[样例](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo)学习其他语言接口的使用方法。
+你可以参考[样例](https://github.com/open-mmlab/mmdeploy/tree/main/demo)学习其他语言接口的使用方法。
> mmaction2 的 C#,Java接口待开发
## 模型支持列表
-| Model | TorchScript | ONNX Runtime | TensorRT | ncnn | PPLNN | OpenVINO |
-| :-------------------------------------------------------------------------------------------- | :---------: | :----------: | :------: | :--: | :---: | :------: |
-| [TSN](https://github.com/open-mmlab/mmaction2/tree/dev-1.x/configs/recognition/tsn) | N | Y | Y | N | N | N |
-| [SlowFast](https://github.com/open-mmlab/mmaction2/tree/dev-1.x/configs/recognition/slowfast) | N | Y | Y | N | N | N |
+| Model | TorchScript | ONNX Runtime | TensorRT | ncnn | PPLNN | OpenVINO |
+| :---------------------------------------------------------------------------------------- | :---------: | :----------: | :------: | :--: | :---: | :------: |
+| [TSN](https://github.com/open-mmlab/mmaction2/tree/1.x/configs/recognition/tsn) | N | Y | Y | N | N | N |
+| [SlowFast](https://github.com/open-mmlab/mmaction2/tree/1.x/configs/recognition/slowfast) | N | Y | Y | N | N | N |
diff --git a/docs/zh_cn/04-supported-codebases/mmcls.md b/docs/zh_cn/04-supported-codebases/mmcls.md
index 5ed7bfe6c8..ef906a178e 100644
--- a/docs/zh_cn/04-supported-codebases/mmcls.md
+++ b/docs/zh_cn/04-supported-codebases/mmcls.md
@@ -27,7 +27,7 @@ mmdeploy 有以下几种安装方式:
**方式一:** 安装预编译包
-> 待 mmdeploy 正式发布 1.x,再补充
+请参考[安装概述](https://mmdeploy.readthedocs.io/zh_CN/latest/get_started.html#mmdeploy)
**方式二:** 一键式脚本安装
@@ -35,7 +35,7 @@ mmdeploy 有以下几种安装方式:
比如,以下命令可以安装 mmdeploy 以及配套的推理引擎——`ONNX Runtime`.
```shell
-git clone --recursive -b 1.x https://github.com/open-mmlab/mmdeploy.git
+git clone --recursive -b main https://github.com/open-mmlab/mmdeploy.git
cd mmdeploy
python3 tools/scripts/build_ubuntu_x64_ort.py $(nproc)
export PYTHONPATH=$(pwd)/build/lib:$PYTHONPATH
@@ -48,8 +48,8 @@ export LD_LIBRARY_PATH=$(pwd)/../mmdeploy-dep/onnxruntime-linux-x64-1.8.1/lib/:$
## 模型转换
-你可以使用 [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/tree/1.x/tools/deploy.py) 把 mmcls 模型一键式转换为推理后端模型。
-该工具的详细使用说明请参考[这里](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/zh_cn/02-how-to-run/convert_model.md#使用方法).
+你可以使用 [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/tree/main/tools/deploy.py) 把 mmcls 模型一键式转换为推理后端模型。
+该工具的详细使用说明请参考[这里](https://github.com/open-mmlab/mmdeploy/tree/main/docs/zh_cn/02-how-to-run/convert_model.md#使用方法).
以下,我们将演示如何把 `resnet18` 转换为 onnx 模型。
@@ -71,7 +71,7 @@ python tools/deploy.py \
--dump-info
```
-转换的关键之一是使用正确的配置文件。项目中已内置了各后端部署[配置文件](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmcls)。
+转换的关键之一是使用正确的配置文件。项目中已内置了各后端部署[配置文件](https://github.com/open-mmlab/mmdeploy/tree/main/configs/mmcls)。
文件的命名模式是:
```
@@ -173,7 +173,7 @@ for label_id, score in result:
```
除了python API,mmdeploy SDK 还提供了诸如 C、C++、C#、Java等多语言接口。
-你可以参考[样例](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo)学习其他语言接口的使用方法。
+你可以参考[样例](https://github.com/open-mmlab/mmdeploy/tree/main/demo)学习其他语言接口的使用方法。
## 模型支持列表
diff --git a/docs/zh_cn/04-supported-codebases/mmdet.md b/docs/zh_cn/04-supported-codebases/mmdet.md
index a1af645a98..c7b412c28d 100644
--- a/docs/zh_cn/04-supported-codebases/mmdet.md
+++ b/docs/zh_cn/04-supported-codebases/mmdet.md
@@ -27,7 +27,7 @@ mmdeploy 有以下几种安装方式:
**方式一:** 安装预编译包
-> 待 mmdeploy 正式发布 1.x,再补充
+请参考[安装概述](https://mmdeploy.readthedocs.io/zh_CN/latest/get_started.html#mmdeploy)
**方式二:** 一键式脚本安装
@@ -35,7 +35,7 @@ mmdeploy 有以下几种安装方式:
比如,以下命令可以安装 mmdeploy 以及配套的推理引擎——`ONNX Runtime`.
```shell
-git clone --recursive -b 1.x https://github.com/open-mmlab/mmdeploy.git
+git clone --recursive -b main https://github.com/open-mmlab/mmdeploy.git
cd mmdeploy
python3 tools/scripts/build_ubuntu_x64_ort.py $(nproc)
export PYTHONPATH=$(pwd)/build/lib:$PYTHONPATH
@@ -48,8 +48,8 @@ export LD_LIBRARY_PATH=$(pwd)/../mmdeploy-dep/onnxruntime-linux-x64-1.8.1/lib/:$
## 模型转换
-你可以使用 [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/blob/1.x/tools/deploy.py) 把 mmdet 模型一键式转换为推理后端模型。
-该工具的详细使用说明请参考[这里](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/en/02-how-to-run/convert_model.md#usage).
+你可以使用 [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/blob/main/tools/deploy.py) 把 mmdet 模型一键式转换为推理后端模型。
+该工具的详细使用说明请参考[这里](https://github.com/open-mmlab/mmdeploy/tree/main/docs/en/02-how-to-run/convert_model.md#usage).
以下,我们将演示如何把 `Faster R-CNN` 转换为 onnx 模型。
@@ -69,7 +69,7 @@ python tools/deploy.py \
--dump-info
```
-转换的关键之一是使用正确的配置文件。项目中已内置了各后端部署[配置文件](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmdet)。
+转换的关键之一是使用正确的配置文件。项目中已内置了各后端部署[配置文件](https://github.com/open-mmlab/mmdeploy/tree/main/configs/mmdet)。
文件的命名模式是:
```
@@ -188,7 +188,7 @@ cv2.imwrite('output_detection.png', img)
```
除了python API,mmdeploy SDK 还提供了诸如 C、C++、C#、Java等多语言接口。
-你可以参考[样例](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo)学习其他语言接口的使用方法。
+你可以参考[样例](https://github.com/open-mmlab/mmdeploy/tree/main/demo)学习其他语言接口的使用方法。
## 模型支持列表
diff --git a/docs/zh_cn/04-supported-codebases/mmedit.md b/docs/zh_cn/04-supported-codebases/mmedit.md
index 382eb86f6d..58f4fb206f 100644
--- a/docs/zh_cn/04-supported-codebases/mmedit.md
+++ b/docs/zh_cn/04-supported-codebases/mmedit.md
@@ -28,7 +28,7 @@ mmdeploy 有以下几种安装方式:
**方式一:** 安装预编译包
-> 待 mmdeploy 正式发布 1.x,再补充
+请参考[安装概述](https://mmdeploy.readthedocs.io/zh_CN/latest/get_started.html#mmdeploy)
**方式二:** 一键式脚本安装
@@ -36,7 +36,7 @@ mmdeploy 有以下几种安装方式:
比如,以下命令可以安装 mmdeploy 以及配套的推理引擎——`ONNX Runtime`.
```shell
-git clone --recursive -b 1.x https://github.com/open-mmlab/mmdeploy.git
+git clone --recursive -b main https://github.com/open-mmlab/mmdeploy.git
cd mmdeploy
python3 tools/scripts/build_ubuntu_x64_ort.py $(nproc)
export PYTHONPATH=$(pwd)/build/lib:$PYTHONPATH
@@ -49,10 +49,10 @@ export LD_LIBRARY_PATH=$(pwd)/../mmdeploy-dep/onnxruntime-linux-x64-1.8.1/lib/:$
## 模型转换
-你可以使用 [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/tree/1.x/tools/deploy.py) 把 mmedit 模型一键式转换为推理后端模型。
-该工具的详细使用说明请参考[这里](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/zh_cn/02-how-to-run/convert_model.md#使用方法).
+你可以使用 [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/tree/main/tools/deploy.py) 把 mmedit 模型一键式转换为推理后端模型。
+该工具的详细使用说明请参考[这里](https://github.com/open-mmlab/mmdeploy/tree/main/docs/zh_cn/02-how-to-run/convert_model.md#使用方法).
-转换的关键之一是使用正确的配置文件。项目中已内置了各后端部署[配置文件](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmedit)。
+转换的关键之一是使用正确的配置文件。项目中已内置了各后端部署[配置文件](https://github.com/open-mmlab/mmdeploy/tree/main/configs/mmedit)。
文件的命名模式是:
```
@@ -185,7 +185,7 @@ cv2.imwrite('output_restorer.bmp', result)
```
除了python API,mmdeploy SDK 还提供了诸如 C、C++、C#、Java等多语言接口。
-你可以参考[样例](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo)学习其他语言接口的使用方法。
+你可以参考[样例](https://github.com/open-mmlab/mmdeploy/tree/main/demo)学习其他语言接口的使用方法。
## 模型支持列表
diff --git a/docs/zh_cn/04-supported-codebases/mmocr.md b/docs/zh_cn/04-supported-codebases/mmocr.md
index 1e7948b321..dc6762131c 100644
--- a/docs/zh_cn/04-supported-codebases/mmocr.md
+++ b/docs/zh_cn/04-supported-codebases/mmocr.md
@@ -24,7 +24,7 @@ ______________________________________________________________________
### 安装 mmocr
-请参考[官网安装指南](https://mmocr.readthedocs.io/en/dev-1.x/get_started/install.html).
+请参考[官网安装指南](https://mmocr.readthedocs.io/en/latest/install.html).
### 安装 mmdeploy
@@ -32,7 +32,7 @@ mmdeploy 有以下几种安装方式:
**方式一:** 安装预编译包
-> 待 mmdeploy 正式发布 1.x,再补充
+请参考[安装概述](https://mmdeploy.readthedocs.io/zh_CN/latest/get_started.html#mmdeploy)
**方式二:** 一键式脚本安装
@@ -40,7 +40,7 @@ mmdeploy 有以下几种安装方式:
比如,以下命令可以安装 mmdeploy 以及配套的推理引擎——`ONNX Runtime`.
```shell
-git clone --recursive -b 1.x https://github.com/open-mmlab/mmdeploy.git
+git clone --recursive -b main https://github.com/open-mmlab/mmdeploy.git
cd mmdeploy
python3 tools/scripts/build_ubuntu_x64_ort.py $(nproc)
export PYTHONPATH=$(pwd)/build/lib:$PYTHONPATH
@@ -53,10 +53,10 @@ export LD_LIBRARY_PATH=$(pwd)/../mmdeploy-dep/onnxruntime-linux-x64-1.8.1/lib/:$
## 模型转换
-你可以使用 [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/tree/1.x/tools/deploy.py) 把 mmocr 模型一键式转换为推理后端模型。
-该工具的详细使用说明请参考[这里](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/en/02-how-to-run/convert_model.md#usage).
+你可以使用 [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/tree/main/tools/deploy.py) 把 mmocr 模型一键式转换为推理后端模型。
+该工具的详细使用说明请参考[这里](https://github.com/open-mmlab/mmdeploy/tree/main/docs/en/02-how-to-run/convert_model.md#usage).
-转换的关键之一是使用正确的配置文件。项目中已内置了各后端部署[配置文件](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmocr)。
+转换的关键之一是使用正确的配置文件。项目中已内置了各后端部署[配置文件](https://github.com/open-mmlab/mmdeploy/tree/main/configs/mmocr)。
文件的命名模式是:
```
@@ -234,7 +234,7 @@ print(texts)
```
除了python API,mmdeploy SDK 还提供了诸如 C、C++、C#、Java等多语言接口。
-你可以参考[样例](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo)学习其他语言接口的使用方法。
+你可以参考[样例](https://github.com/open-mmlab/mmdeploy/tree/main/demo)学习其他语言接口的使用方法。
## 模型支持列表
diff --git a/docs/zh_cn/04-supported-codebases/mmpose.md b/docs/zh_cn/04-supported-codebases/mmpose.md
index ce5ea46574..cdddec6bac 100644
--- a/docs/zh_cn/04-supported-codebases/mmpose.md
+++ b/docs/zh_cn/04-supported-codebases/mmpose.md
@@ -27,7 +27,7 @@ mmdeploy 有以下几种安装方式:
**方式一:** 安装预编译包
-> 待 mmdeploy 正式发布 1.x,再补充
+请参考[安装概述](https://mmdeploy.readthedocs.io/zh_CN/latest/get_started.html#mmdeploy)
**方式二:** 一键式脚本安装
@@ -35,7 +35,7 @@ mmdeploy 有以下几种安装方式:
比如,以下命令可以安装 mmdeploy 以及配套的推理引擎——`ONNX Runtime`.
```shell
-git clone --recursive -b 1.x https://github.com/open-mmlab/mmdeploy.git
+git clone --recursive -b main https://github.com/open-mmlab/mmdeploy.git
cd mmdeploy
python3 tools/scripts/build_ubuntu_x64_ort.py $(nproc)
export PYTHONPATH=$(pwd)/build/lib:$PYTHONPATH
@@ -48,8 +48,8 @@ export LD_LIBRARY_PATH=$(pwd)/../mmdeploy-dep/onnxruntime-linux-x64-1.8.1/lib/:$
## 模型转换
-你可以使用 [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/tree/1.x/tools/deploy.py) 把 mmpose 模型一键式转换为推理后端模型。
-该工具的详细使用说明请参考[这里](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/en/02-how-to-run/convert_model.md#usage).
+你可以使用 [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/tree/main/tools/deploy.py) 把 mmpose 模型一键式转换为推理后端模型。
+该工具的详细使用说明请参考[这里](https://github.com/open-mmlab/mmdeploy/tree/main/docs/en/02-how-to-run/convert_model.md#usage).
以下,我们将演示如何把 `hrnet` 转换为 onnx 模型。
@@ -68,7 +68,7 @@ python tools/deploy.py \
--show
```
-转换的关键之一是使用正确的配置文件。项目中已内置了各后端部署[配置文件](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmpose)。
+转换的关键之一是使用正确的配置文件。项目中已内置了各后端部署[配置文件](https://github.com/open-mmlab/mmdeploy/tree/main/configs/mmpose)。
文件的命名模式是:
```
diff --git a/docs/zh_cn/04-supported-codebases/mmrotate.md b/docs/zh_cn/04-supported-codebases/mmrotate.md
index 8191ac4333..1100fed710 100644
--- a/docs/zh_cn/04-supported-codebases/mmrotate.md
+++ b/docs/zh_cn/04-supported-codebases/mmrotate.md
@@ -27,7 +27,7 @@ mmdeploy 有以下几种安装方式:
**方式一:** 安装预编译包
-> 待 mmdeploy 正式发布 dev-1.x,再补充
+请参考[安装概述](https://mmdeploy.readthedocs.io/zh_CN/latest/get_started.html#mmdeploy)
**方式二:** 一键式脚本安装
@@ -35,7 +35,7 @@ mmdeploy 有以下几种安装方式:
比如,以下命令可以安装 mmdeploy 以及配套的推理引擎——`ONNX Runtime`.
```shell
-git clone --recursive -b dev-1.x https://github.com/open-mmlab/mmdeploy.git
+git clone --recursive -b main https://github.com/open-mmlab/mmdeploy.git
cd mmdeploy
python3 tools/scripts/build_ubuntu_x64_ort.py $(nproc)
export PYTHONPATH=$(pwd)/build/lib:$PYTHONPATH
@@ -52,7 +52,7 @@ export LD_LIBRARY_PATH=$(pwd)/../mmdeploy-dep/onnxruntime-linux-x64-1.8.1/lib/:$
## 模型转换
-你可以使用 [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/tools/deploy.py) 把 mmrotate 模型一键式转换为推理后端模型。
+你可以使用 [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/blob/main/tools/deploy.py) 把 mmrotate 模型一键式转换为推理后端模型。
该工具的详细使用说明请参考[这里](https://github.com/open-mmlab/mmdeploy/blob/master/docs/en/02-how-to-run/convert_model.md#usage).
以下,我们将演示如何把 `rotated-faster-rcnn` 转换为 onnx 模型。
@@ -76,7 +76,7 @@ python tools/deploy.py \
--dump-info
```
-转换的关键之一是使用正确的配置文件。项目中已内置了各后端部署[配置文件](https://github.com/open-mmlab/mmdeploy/tree/dev-1.x/configs/mmrotate)。
+转换的关键之一是使用正确的配置文件。项目中已内置了各后端部署[配置文件](https://github.com/open-mmlab/mmdeploy/tree/main/configs/mmrotate)。
文件的命名模式是:
```
@@ -176,7 +176,7 @@ det = detector(img)
```
除了python API,mmdeploy SDK 还提供了诸如 C、C++、C#、Java等多语言接口。
-你可以参考[样例](https://github.com/open-mmlab/mmdeploy/tree/dev-1.x/demo)学习其他语言接口的使用方法。
+你可以参考[样例](https://github.com/open-mmlab/mmdeploy/tree/main/demo)学习其他语言接口的使用方法。
## 模型支持列表
@@ -186,4 +186,4 @@ det = detector(img)
| [Rotated FasterRCNN](https://github.com/open-mmlab/mmrotate/blob/1.x/configs/rotated_faster_rcnn) | Y | Y |
| [Oriented R-CNN](https://github.com/open-mmlab/mmrotate/blob/1.x/configs/oriented_rcnn) | Y | Y |
| [Gliding Vertex](https://github.com/open-mmlab/mmrotate/blob/1.x/configs/gliding_vertex) | Y | Y |
-| [RTMDET-R](https://github.com/open-mmlab/mmrotate/blob/dev-1.x/configs/rotated_rtmdet) | Y | Y |
+| [RTMDET-R](https://github.com/open-mmlab/mmrotate/blob/1.x/configs/rotated_rtmdet) | Y | Y |
diff --git a/docs/zh_cn/04-supported-codebases/mmseg.md b/docs/zh_cn/04-supported-codebases/mmseg.md
index 2292259300..290e5c7723 100644
--- a/docs/zh_cn/04-supported-codebases/mmseg.md
+++ b/docs/zh_cn/04-supported-codebases/mmseg.md
@@ -20,7 +20,7 @@ ______________________________________________________________________
### 安装 mmseg
-请参考[官网安装指南](https://mmsegmentation.readthedocs.io/en/1.x/get_started.html)。
+请参考[官网安装指南](https://mmsegmentation.readthedocs.io/en/latest/get_started.html)。
### 安装 mmdeploy
@@ -28,7 +28,7 @@ mmdeploy 有以下几种安装方式:
**方式一:** 安装预编译包
-> 待 mmdeploy 正式发布 1.x,再补充
+请参考[安装概述](https://mmdeploy.readthedocs.io/zh_CN/latest/get_started.html#mmdeploy)
**方式二:** 一键式脚本安装
@@ -36,7 +36,7 @@ mmdeploy 有以下几种安装方式:
比如,以下命令可以安装 mmdeploy 以及配套的推理引擎——`ONNX Runtime`.
```shell
-git clone --recursive -b 1.x https://github.com/open-mmlab/mmdeploy.git
+git clone --recursive -b main https://github.com/open-mmlab/mmdeploy.git
cd mmdeploy
python3 tools/scripts/build_ubuntu_x64_ort.py $(nproc)
export PYTHONPATH=$(pwd)/build/lib:$PYTHONPATH
@@ -53,8 +53,8 @@ export LD_LIBRARY_PATH=$(pwd)/../mmdeploy-dep/onnxruntime-linux-x64-1.8.1/lib/:$
## 模型转换
-你可以使用 [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/tree/1.x/tools/deploy.py) 把 mmseg 模型一键式转换为推理后端模型。
-该工具的详细使用说明请参考[这里](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/en/02-how-to-run/convert_model.md#usage).
+你可以使用 [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/tree/main/tools/deploy.py) 把 mmseg 模型一键式转换为推理后端模型。
+该工具的详细使用说明请参考[这里](https://github.com/open-mmlab/mmdeploy/tree/main/docs/en/02-how-to-run/convert_model.md#usage).
以下,我们将演示如何把 `unet` 转换为 onnx 模型。
@@ -76,7 +76,7 @@ python tools/deploy.py \
--dump-info
```
-转换的关键之一是使用正确的配置文件。项目中已内置了各后端部署[配置文件](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmseg)。
+转换的关键之一是使用正确的配置文件。项目中已内置了各后端部署[配置文件](https://github.com/open-mmlab/mmdeploy/tree/main/configs/mmseg)。
文件的命名模式是:
```
@@ -188,7 +188,7 @@ cv2.imwrite('output_segmentation.png', img)
```
除了python API,mmdeploy SDK 还提供了诸如 C、C++、C#、Java等多语言接口。
-你可以参考[样例](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo)学习其他语言接口的使用方法。
+你可以参考[样例](https://github.com/open-mmlab/mmdeploy/tree/main/demo)学习其他语言接口的使用方法。
## 模型支持列表
diff --git a/docs/zh_cn/05-supported-backends/rknn.md b/docs/zh_cn/05-supported-backends/rknn.md
index 165029e5b9..159d871866 100644
--- a/docs/zh_cn/05-supported-backends/rknn.md
+++ b/docs/zh_cn/05-supported-backends/rknn.md
@@ -2,7 +2,7 @@
目前, MMDeploy 只在 rk3588 和 rv1126 的 linux 平台上测试过.
-以下特性需要手动在 MMDeploy 自行配置,如[这里](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/_base_/backends/rknn.py).
+以下特性需要手动在 MMDeploy 自行配置,如[这里](https://github.com/open-mmlab/mmdeploy/tree/main/configs/_base_/backends/rknn.py).
- target_platform != default
- quantization settings
diff --git a/docs/zh_cn/06-custom-ops/tensorrt.md b/docs/zh_cn/06-custom-ops/tensorrt.md
index e11b837127..75f353dcb2 100644
--- a/docs/zh_cn/06-custom-ops/tensorrt.md
+++ b/docs/zh_cn/06-custom-ops/tensorrt.md
@@ -302,7 +302,7 @@ Perform RoIAlign on output feature, used in bbox_head of most two-stage detector
#### Description
-ScatterND takes three inputs `data` tensor of rank r >= 1, `indices` tensor of rank q >= 1, and `updates` tensor of rank q + r - indices.shape\[-1\] - 1. The output of the operation is produced by creating a copy of the input `data`, and then updating its value to values specified by updates at specific index positions specified by `indices`. Its output shape is the same as the shape of `data`. Note that `indices` should not have duplicate entries. That is, two or more updates for the same index-location is not supported.
+ScatterND takes three inputs `data` tensor of rank r >= 1, `indices` tensor of rank q >= 1, and `updates` tensor of rank q + r - indices.shape[-1] - 1. The output of the operation is produced by creating a copy of the input `data`, and then updating its value to values specified by updates at specific index positions specified by `indices`. Its output shape is the same as the shape of `data`. Note that `indices` should not have duplicate entries. That is, two or more updates for the same index-location is not supported.
The `output` is calculated via the following equation:
diff --git a/docs/zh_cn/conf.py b/docs/zh_cn/conf.py
index fd8ab6b95f..c060165f55 100644
--- a/docs/zh_cn/conf.py
+++ b/docs/zh_cn/conf.py
@@ -106,7 +106,7 @@
# documentation.
#
html_theme_options = {
- 'logo_url': 'https://mmdeploy.readthedocs.io/zh_CN/1.x/',
+ 'logo_url': 'https://mmdeploy.readthedocs.io/zh_CN/latest/',
'menu': [{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmdeploy'
diff --git a/docs/zh_cn/faq.md b/docs/zh_cn/faq.md
index 67615b1d14..92e6cb4ef6 100644
--- a/docs/zh_cn/faq.md
+++ b/docs/zh_cn/faq.md
@@ -6,7 +6,7 @@
Fp16 mode requires a device with full-rate fp16 support.
-- "error: parameter check failed at: engine.cpp::setBindingDimensions::1046, condition: profileMinDims.d\[i\] \<= dimensions.d\[i\]"
+- "error: parameter check failed at: engine.cpp::setBindingDimensions::1046, condition: profileMinDims.d[i] \<= dimensions.d[i]"
When building an `ICudaEngine` from an `INetworkDefinition` that has dynamically resizable inputs, users need to specify at least one optimization profile. Which can be set in deploy config:
@@ -25,7 +25,7 @@
The input tensor shape should be limited between `min_shape` and `max_shape`.
-- "error: \[TensorRT\] INTERNAL ERROR: Assertion failed: cublasStatus == CUBLAS_STATUS_SUCCESS"
+- "error: [TensorRT] INTERNAL ERROR: Assertion failed: cublasStatus == CUBLAS_STATUS_SUCCESS"
TRT 7.2.1 switches to use cuBLASLt (previously it was cuBLAS). cuBLASLt is the defaulted choice for SM version >= 7.0. You may need CUDA-10.2 Patch 1 (Released Aug 26, 2020) to resolve some cuBLASLt issues. Another option is to use the new TacticSource API and disable cuBLASLt tactics if you dont want to upgrade.
diff --git a/docs/zh_cn/get_started.md b/docs/zh_cn/get_started.md
index 4402c6f0e7..27b4e55245 100644
--- a/docs/zh_cn/get_started.md
+++ b/docs/zh_cn/get_started.md
@@ -113,14 +113,14 @@ mim install "mmcv>=2.0.0rc2"
```shell
# 1. 安装 MMDeploy 模型转换工具(含trt/ort自定义算子)
-pip install mmdeploy==1.0.0rc3
+pip install mmdeploy==1.0.0
# 2. 安装 MMDeploy SDK推理工具
# 根据是否需要GPU推理可任选其一进行下载安装
# 2.1 支持 onnxruntime 推理
-pip install mmdeploy-runtime==1.0.0rc3
+pip install mmdeploy-runtime==1.0.0
# 2.2 支持 onnxruntime-gpu tensorrt 推理
-pip install mmdeploy-runtime-gpu==1.0.0rc3
+pip install mmdeploy-runtime-gpu==1.0.0
# 3. 安装推理引擎
# 3.1 安装推理引擎 TensorRT
@@ -165,7 +165,7 @@ export LD_LIBRARY_PATH=$ONNXRUNTIME_DIR/lib:$LD_LIBRARY_PATH
```shell
# 克隆 mmdeploy 仓库。转换时,需要使用 mmdeploy 仓库中的配置文件,建立转换流水线, `--recursive` 不是必须的
-git clone -b dev-1.x --recursive https://github.com/open-mmlab/mmdeploy.git
+git clone -b main --recursive https://github.com/open-mmlab/mmdeploy.git
# 安装 mmdetection。转换时,需要使用 mmdetection 仓库中的模型配置文件,构建 PyTorch nn module
git clone -b 3.x https://github.com/open-mmlab/mmdetection.git
@@ -223,10 +223,10 @@ result = inference_model(
你可以直接运行预编译包中的 demo 程序,输入 SDK Model 和图像,进行推理,并查看推理结果。
```shell
-wget https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0rc3/mmdeploy-1.0.0rc3-linux-x86_64-cuda11.3.tar.gz
-tar xf mmdeploy-1.0.0rc3-linux-x86_64-cuda11.3
+wget https://github.com/open-mmlab/mmdeploy/releases/download/v1.0.0/mmdeploy-1.0.0-linux-x86_64-cuda11.3.tar.gz
+tar xf mmdeploy-1.0.0-linux-x86_64-cuda11.3
-cd mmdeploy-1.0.0rc3-linux-x86_64-cuda11.3
+cd mmdeploy-1.0.0-linux-x86_64-cuda11.3
# 运行 python demo
python example/python/object_detection.py cuda ../mmdeploy_model/faster-rcnn ../mmdetection/demo/demo.jpg
# 运行 C/C++ demo
@@ -268,7 +268,7 @@ for index, bbox, label_id in zip(indices, bboxes, labels):
cv2.imwrite('output_detection.png', img)
```
-更多示例,请查阅[这里](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo/python)。
+更多示例,请查阅[这里](https://github.com/open-mmlab/mmdeploy/tree/main/demo/python)。
#### C++ API
@@ -322,9 +322,9 @@ target_link_libraries(${name} PRIVATE mmdeploy ${OpenCV_LIBS})
```
编译时,使用 -DMMDeploy_DIR,传入MMDeloyConfig.cmake所在的路径。它在预编译包中的sdk/lib/cmake/MMDeloy下。
-更多示例,请查阅[此处](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo/csrc/cpp)。
+更多示例,请查阅[此处](https://github.com/open-mmlab/mmdeploy/tree/main/demo/csrc/cpp)。
-对于 C API、C# API、Java API 的使用方法,请分别阅读代码[C demos](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo/csrc/c), [C# demos](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo/csharp) 和 [Java demos](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo/java)。
+对于 C API、C# API、Java API 的使用方法,请分别阅读代码[C demos](https://github.com/open-mmlab/mmdeploy/tree/main/demo/csrc/c), [C# demos](https://github.com/open-mmlab/mmdeploy/tree/main/demo/csharp) 和 [Java demos](https://github.com/open-mmlab/mmdeploy/tree/main/demo/java)。
我们将在后续版本中详细讲述它们的用法。
#### 加速预处理(实验性功能)
diff --git a/docs/zh_cn/switch_language.md b/docs/zh_cn/switch_language.md
index 03bf778b72..1b5df42929 100644
--- a/docs/zh_cn/switch_language.md
+++ b/docs/zh_cn/switch_language.md
@@ -1,3 +1,3 @@
-## English
+## English
-## 简体中文
+## 简体中文
diff --git a/docs/zh_cn/tutorial/02_challenges.md b/docs/zh_cn/tutorial/02_challenges.md
index e3d0e37970..85e4b0c23a 100644
--- a/docs/zh_cn/tutorial/02_challenges.md
+++ b/docs/zh_cn/tutorial/02_challenges.md
@@ -1,6 +1,6 @@
# 第二章:解决模型部署中的难题
-在[第一章](https://mmdeploy.readthedocs.io/zh_CN/1.x/tutorial/01_introduction_to_model_deployment.html)中,我们部署了一个简单的超分辨率模型,一切都十分顺利。但是,上一个模型还有一些缺陷——图片的放大倍数固定是 4,我们无法让图片放大任意的倍数。现在,我们来尝试部署一个支持动态放大倍数的模型,体验一下在模型部署中可能会碰到的困难。
+在[第一章](https://mmdeploy.readthedocs.io/zh_CN/latest/tutorial/01_introduction_to_model_deployment.html)中,我们部署了一个简单的超分辨率模型,一切都十分顺利。但是,上一个模型还有一些缺陷——图片的放大倍数固定是 4,我们无法让图片放大任意的倍数。现在,我们来尝试部署一个支持动态放大倍数的模型,体验一下在模型部署中可能会碰到的困难。
## 模型部署中常见的难题
@@ -10,7 +10,7 @@
- 新算子的实现。深度学习技术日新月异,提出新算子的速度往往快于 ONNX 维护者支持的速度。为了部署最新的模型,部署工程师往往需要自己在 ONNX 和推理引擎中支持新算子。
- 中间表示与推理引擎的兼容问题。由于各推理引擎的实现不同,对 ONNX 难以形成统一的支持。为了确保模型在不同的推理引擎中有同样的运行效果,部署工程师往往得为某个推理引擎定制模型代码,这为模型部署引入了许多工作量。
-我们会在后续教程详细讲述解决这些问题的方法。如果对前文中 ONNX、推理引擎、中间表示、算子等名词感觉陌生,不用担心,可以阅读[第一章](https://mmdeploy.readthedocs.io/zh_CN/1.x/tutorial/01_introduction_to_model_deployment.html),了解有关概念。
+我们会在后续教程详细讲述解决这些问题的方法。如果对前文中 ONNX、推理引擎、中间表示、算子等名词感觉陌生,不用担心,可以阅读[第一章](https://mmdeploy.readthedocs.io/zh_CN/latest/tutorial/01_introduction_to_model_deployment.html),了解有关概念。
现在,让我们对原来的 SRCNN 模型做一些小的修改,体验一下模型动态化对模型部署造成的困难,并学习解决该问题的一种方法。
@@ -38,7 +38,7 @@ def init_torch_model():
现在,假设我们要做一个超分辨率的应用。我们的用户希望图片的放大倍数能够自由设置。而我们交给用户的,只有一个 .onnx 文件和运行超分辨率模型的应用程序。我们在不修改 .onnx 文件的前提下改变放大倍数。
-因此,我们必须修改原来的模型,令模型的放大倍数变成推理时的输入。在[第一章](https://mmdeploy.readthedocs.io/zh_CN/1.x/tutorial/01_introduction_to_model_deployment.html)中的 Python 脚本的基础上,我们做一些修改,得到这样的脚本:
+因此,我们必须修改原来的模型,令模型的放大倍数变成推理时的输入。在[第一章](https://mmdeploy.readthedocs.io/zh_CN/latest/tutorial/01_introduction_to_model_deployment.html)中的 Python 脚本的基础上,我们做一些修改,得到这样的脚本:
```python
import torch
@@ -75,7 +75,7 @@ def init_torch_model():
torch_model = SuperResolutionNet()
# Please read the code about downloading 'srcnn.pth' and 'face.png' in
- # https://mmdeploy.readthedocs.io/zh_CN/1.x/tutorial/01_introduction_to_model_deployment.html#pytorch
+ # https://mmdeploy.readthedocs.io/zh_CN/latest/tutorial/01_introduction_to_model_deployment.html#pytorch
state_dict = torch.load('srcnn.pth')['state_dict']
# Adapt the checkpoint
@@ -173,7 +173,7 @@ with torch.no_grad():
![image](https://user-images.githubusercontent.com/4560679/157626910-de33365c-b60a-49f4-ada7-157111afa6e2.png)
-其中,展开 scales,可以看到 scales 是一个长度为 4 的一维张量,其内容为 \[1, 1, 3, 3\], 表示 Resize 操作每一个维度的缩放系数;其类型为 Initializer,表示这个值是根据常量直接初始化出来的。如果我们能够自己生成一个 ONNX 的 Resize 算子,让 scales 成为一个可变量而不是常量,就像它上面的 X 一样,那这个超分辨率模型就能动态缩放了。
+其中,展开 scales,可以看到 scales 是一个长度为 4 的一维张量,其内容为 [1, 1, 3, 3], 表示 Resize 操作每一个维度的缩放系数;其类型为 Initializer,表示这个值是根据常量直接初始化出来的。如果我们能够自己生成一个 ONNX 的 Resize 算子,让 scales 成为一个可变量而不是常量,就像它上面的 X 一样,那这个超分辨率模型就能动态缩放了。
现有实现插值的 PyTorch 算子有一套规定好的映射到 ONNX Resize 算子的方法,这些映射出的 Resize 算子的 scales 只能是常量,无法满足我们的需求。我们得自己定义一个实现插值的 PyTorch 算子,然后让它映射到一个我们期望的 ONNX Resize 算子上。
@@ -294,9 +294,9 @@ class NewInterpolate(torch.autograd.Function):
align_corners=False)
```
-在具体介绍这个算子的实现前,让我们先理清一下思路。我们希望新的插值算子有两个输入,一个是被用于操作的图像,一个是图像的放缩比例。前面讲到,为了对接 ONNX 中 Resize 算子的 scales 参数,这个放缩比例是一个 \[1, 1, x, x\] 的张量,其中 x 为放大倍数。在之前放大3倍的模型中,这个参数被固定成了\[1, 1, 3, 3\]。因此,在插值算子中,我们希望模型的第二个输入是一个 \[1, 1, w, h\] 的张量,其中 w 和 h 分别是图片宽和高的放大倍数。
+在具体介绍这个算子的实现前,让我们先理清一下思路。我们希望新的插值算子有两个输入,一个是被用于操作的图像,一个是图像的放缩比例。前面讲到,为了对接 ONNX 中 Resize 算子的 scales 参数,这个放缩比例是一个 [1, 1, x, x] 的张量,其中 x 为放大倍数。在之前放大3倍的模型中,这个参数被固定成了[1, 1, 3, 3]。因此,在插值算子中,我们希望模型的第二个输入是一个 [1, 1, w, h] 的张量,其中 w 和 h 分别是图片宽和高的放大倍数。
-搞清楚了插值算子的输入,再看一看算子的具体实现。算子的推理行为由算子的 forward 方法决定。该方法的第一个参数必须为 ctx,后面的参数为算子的自定义输入,我们设置两个输入,分别为被操作的图像和放缩比例。为保证推理正确,需要把 \[1, 1, w, h\] 格式的输入对接到原来的 interpolate 函数上。我们的做法是截取输入张量的后两个元素,把这两个元素以 list 的格式传入 interpolate 的 scale_factor 参数。
+搞清楚了插值算子的输入,再看一看算子的具体实现。算子的推理行为由算子的 forward 方法决定。该方法的第一个参数必须为 ctx,后面的参数为算子的自定义输入,我们设置两个输入,分别为被操作的图像和放缩比例。为保证推理正确,需要把 [1, 1, w, h] 格式的输入对接到原来的 interpolate 函数上。我们的做法是截取输入张量的后两个元素,把这两个元素以 list 的格式传入 interpolate 的 scale_factor 参数。
接下来,我们要决定新算子映射到 ONNX 算子的方法。映射到 ONNX 的方法由一个算子的 symbolic 方法决定。symbolic 方法第一个参数必须是g,之后的参数是算子的自定义输入,和 forward 函数一样。ONNX 算子的具体定义由 g.op 实现。g.op 的每个参数都可以映射到 ONNX 中的算子属性:
diff --git a/docs/zh_cn/tutorial/03_pytorch2onnx.md b/docs/zh_cn/tutorial/03_pytorch2onnx.md
index 5839c0afb4..1a30c9c9b1 100644
--- a/docs/zh_cn/tutorial/03_pytorch2onnx.md
+++ b/docs/zh_cn/tutorial/03_pytorch2onnx.md
@@ -228,7 +228,7 @@ class Model(torch.nn.Module):
return x
```
-这里,我们仅在模型导出时把输出张量的数值限制在\[0, 1\]之间。使用 `is_in_onnx_export` 确实能让我们方便地在代码中添加和模型部署相关的逻辑。但是,这些代码对只关心模型训练的开发者和用户来说很不友好,突兀的部署逻辑会降低代码整体的可读性。同时,`is_in_onnx_export` 只能在每个需要添加部署逻辑的地方都“打补丁”,难以进行统一的管理。我们之后会介绍如何使用 MMDeploy 的重写机制来规避这些问题。
+这里,我们仅在模型导出时把输出张量的数值限制在[0, 1]之间。使用 `is_in_onnx_export` 确实能让我们方便地在代码中添加和模型部署相关的逻辑。但是,这些代码对只关心模型训练的开发者和用户来说很不友好,突兀的部署逻辑会降低代码整体的可读性。同时,`is_in_onnx_export` 只能在每个需要添加部署逻辑的地方都“打补丁”,难以进行统一的管理。我们之后会介绍如何使用 MMDeploy 的重写机制来规避这些问题。
#### 利用中断张量跟踪的操作
diff --git a/docs/zh_cn/tutorial/06_introduction_to_tensorrt.md b/docs/zh_cn/tutorial/06_introduction_to_tensorrt.md
index dca251f127..755f230db4 100644
--- a/docs/zh_cn/tutorial/06_introduction_to_tensorrt.md
+++ b/docs/zh_cn/tutorial/06_introduction_to_tensorrt.md
@@ -44,7 +44,7 @@ python -c "import tensorrt;print(tensorrt.__version__)"
### Jetson
-对于 Jetson 平台,我们有非常详细的安装环境配置教程,可参考 [MMDeploy 安装文档](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/zh_cn/01-how-to-build/jetsons.md)。需要注意的是,在 Jetson 上配置的 CUDA 版本 TensorRT 版本与 JetPack 强相关的,我们选择适配硬件的版本即可。配置好环境后,通过 `python -c "import tensorrt;print(tensorrt.__version__)"` 查看TensorRT版本是否正确。
+对于 Jetson 平台,我们有非常详细的安装环境配置教程,可参考 [MMDeploy 安装文档](https://github.com/open-mmlab/mmdeploy/tree/main/docs/zh_cn/01-how-to-build/jetsons.md)。需要注意的是,在 Jetson 上配置的 CUDA 版本 TensorRT 版本与 JetPack 强相关的,我们选择适配硬件的版本即可。配置好环境后,通过 `python -c "import tensorrt;print(tensorrt.__version__)"` 查看TensorRT版本是否正确。
## 模型构建
diff --git a/mmdeploy/version.py b/mmdeploy/version.py
index b9b90e63ca..d5c458e274 100644
--- a/mmdeploy/version.py
+++ b/mmdeploy/version.py
@@ -1,7 +1,7 @@
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Tuple
-__version__ = '1.0.0rc3'
+__version__ = '1.0.0'
short_version = __version__
diff --git a/requirements.txt b/requirements.txt
index 5f50cbdc09..b5b5d97a6e 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,3 +1,4 @@
-r requirements/build.txt
-r requirements/runtime.txt
-r requirements/tests.txt
+-r requirements/optional.txt
diff --git a/requirements/backends.txt b/requirements/backends.txt
index 6143ebd459..306b2c1528 100644
--- a/requirements/backends.txt
+++ b/requirements/backends.txt
@@ -1,2 +1,2 @@
onnxruntime>=1.8.0
-openvino-dev
+openvino-dev>=2022.3.0
diff --git a/requirements/codebases.txt b/requirements/codebases.txt
index 027c7bb346..2d1a8db3cf 100644
--- a/requirements/codebases.txt
+++ b/requirements/codebases.txt
@@ -5,4 +5,4 @@ mmedit>=1.0.0rc2
mmocr>=1.0.0rc4
mmpose>=1.0.0rc0
mmrotate>=1.0.0rc0
-mmsegmentation>=1.0.0rc0
+mmsegmentation @ git+https://github.com/open-mmlab/mmsegmentation.git@dev-1.x
diff --git a/requirements/optional.txt b/requirements/optional.txt
index 9c445a2534..bd3e5c6e86 100644
--- a/requirements/optional.txt
+++ b/requirements/optional.txt
@@ -1,4 +1,2 @@
--r requirements/codebases.txt
--r requirements/backends.txt
h5py
tqdm
diff --git a/tools/package_tools/packaging/mmdeploy_runtime/version.py b/tools/package_tools/packaging/mmdeploy_runtime/version.py
index 5ad5d727cf..8b33ac0e6c 100644
--- a/tools/package_tools/packaging/mmdeploy_runtime/version.py
+++ b/tools/package_tools/packaging/mmdeploy_runtime/version.py
@@ -1,2 +1,2 @@
# Copyright (c) OpenMMLab. All rights reserved.
-__version__ = '1.0.0rc3'
+__version__ = '1.0.0'
diff --git a/tools/package_tools/test/test_sdk.sh b/tools/package_tools/test/test_sdk.sh
index 57e27de752..472a8d5b4c 100755
--- a/tools/package_tools/test/test_sdk.sh
+++ b/tools/package_tools/test/test_sdk.sh
@@ -18,7 +18,7 @@ cp -r $test_pkg $work_dir
pushd $work_dir
# opencv
-if [[ ! -d $OpenCV_DIR ]]; then
+if [ ! -d "$OpenCV_DIR" ]; then
./install_opencv.sh
fi
diff --git a/tools/scripts/ubuntu_utils.py b/tools/scripts/ubuntu_utils.py
index 742e069629..31cea14037 100644
--- a/tools/scripts/ubuntu_utils.py
+++ b/tools/scripts/ubuntu_utils.py
@@ -29,8 +29,6 @@ def get_job(argv) -> int:
nproc = cmd_result('nproc')
if nproc is not None and len(nproc) > 0:
job = max(int(nproc) - 2, 1)
- else:
- job = 1
else:
job = int(argv[1])
return job