diff --git a/ppgan/utils/setup.py b/ppgan/utils/setup.py index 78df10923..c5c1741c2 100644 --- a/ppgan/utils/setup.py +++ b/ppgan/utils/setup.py @@ -42,6 +42,8 @@ def setup(args, cfg): if paddle.is_compiled_with_cuda(): paddle.set_device('gpu') + elif paddle.is_compiled_with_npu(): + paddle.set_device('npu') else: paddle.set_device('cpu') diff --git a/test_tipc/prepare_npu_test.sh b/test_tipc/prepare_npu_test.sh new file mode 100644 index 000000000..80faa4603 --- /dev/null +++ b/test_tipc/prepare_npu_test.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +BASEDIR=$(dirname "$0") + +function readlinkf() { + perl -MCwd -e 'print Cwd::abs_path shift' "$1"; +} + +REPO_ROOT_PATH=$(readlinkf ${BASEDIR}/../) + +config_files=$(find ${REPO_ROOT_PATH}/test_tipc/configs -name "train_infer_python.txt") +for file in ${config_files}; do + echo $file + sed -i 's/--device:gpu/--device:npu/g' $file +done diff --git a/test_tipc/test_train_inference_python.sh b/test_tipc/test_train_inference_python.sh index bc54fac8f..f84d0acea 100644 --- a/test_tipc/test_train_inference_python.sh +++ b/test_tipc/test_train_inference_python.sh @@ -241,9 +241,9 @@ else if [ ${#gpu} -le 2 ];then # train with cpu or single gpu cmd="${python} ${run_train} ${set_use_gpu} ${set_save_model} ${set_train_params1} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_amp_config} " elif [ ${#ips} -le 26 ];then # train with multi-gpu - cmd="${python} -m paddle.distributed.launch --gpus=${gpu} ${run_train} ${set_use_gpu} ${set_save_model} ${set_train_params1} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_amp_config}" + cmd="${python} -m paddle.distributed.launch --devices=${gpu} ${run_train} ${set_use_gpu} ${set_save_model} ${set_train_params1} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_amp_config}" else # train with multi-machine - cmd="${python} -m paddle.distributed.launch --ips=${ips} --gpus=${gpu} ${run_train} ${set_use_gpu} ${set_save_model} ${set_train_params1} ${set_pretrain} ${set_epoch} ${set_autocast} ${set_batchsize} ${set_amp_config}" + cmd="${python} -m paddle.distributed.launch --ips=${ips} --devices=${gpu} ${run_train} ${set_use_gpu} ${set_save_model} ${set_train_params1} ${set_pretrain} ${set_epoch} ${set_autocast} ${set_batchsize} ${set_amp_config}" fi # run train eval "unset CUDA_VISIBLE_DEVICES" diff --git a/tools/inference.py b/tools/inference.py index 6cb4698c4..92c832b12 100644 --- a/tools/inference.py +++ b/tools/inference.py @@ -41,8 +41,8 @@ def parse_args(): "--device", default="gpu", type=str, - choices=["cpu", "gpu", "xpu"], - help="The device to select to train the model, is must be cpu/gpu/xpu.") + choices=["cpu", "gpu", "xpu", "npu"], + help="The device to select to train the model, is must be cpu/gpu/xpu/npu.") parser.add_argument('-c', '--config-file', metavar="FILE", @@ -117,6 +117,8 @@ def create_predictor(model_path, config.disable_gpu() elif device == "xpu": config.enable_xpu(100) + elif device == "npu": + config.enable_npu() else: config.disable_gpu()